Exemple #1
0
def run(test, params, env):
    """
    Test command: virsh setvcpus.

    The command can change the number of virtual CPUs in the guest domain.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh setvcpus operation.
    3.Recover test environment.
    4.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    pre_vm_state = params.get("setvcpus_pre_vm_state")
    command = params.get("setvcpus_command", "setvcpus")
    options = params.get("setvcpus_options")
    vm_ref = params.get("setvcpus_vm_ref", "name")
    status_error = (params.get("status_error", "no") == "yes")
    convert_err = "Can't convert {0} to integer type"
    try:
        current_vcpu = int(params.get("setvcpus_current", "1"))
    except ValueError:
        test.error(convert_err.format(current_vcpu))
    try:
        max_vcpu = int(params.get("setvcpus_max", "4"))
    except ValueError:
        test.error(convert_err.format(max_vcpu))
    try:
        count = params.get("setvcpus_count", "")
        if count:
            count = eval(count)
        count = int(count)
    except ValueError:
        # 'count' may not invalid number in negative tests
        logging.debug(convert_err.format(count))

    extra_param = params.get("setvcpus_extra_param")
    count_option = "%s %s" % (count, extra_param)
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    remote_pwd = params.get("remote_pwd", "")
    remote_user = params.get("remote_user", "root")
    remote_uri = params.get("remote_uri")
    tmpxml = os.path.join(data_dir.get_tmp_dir(), 'tmp.xml')
    topology_correction = "yes" == params.get("topology_correction", "yes")
    with_topology = "yes" == params.get("with_topology", "yes")
    update_maxmum_config = "yes" == params.get("update_maxmum_config", "no")
    no_acpi = "yes" == params.get("no_acpi", "no")
    # virsh start vm after destroy it
    restart_vm = "yes" == params.get("restart_vm", "no")
    # reboot the vm
    vm_reboot = "yes" == params.get("vm_reboot", "no")
    hot_unplug = "yes" == params.get('hot_unplug', "no")
    hotplugin_count = params.get("hotplugin_count")
    result = True

    # Early death 1.1
    if remote_uri:
        if remote_ip.count("EXAMPLE.COM"):
            test.cancel("remote ip parameters not set.")
        ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd)

    # Early death 1.2
    option_list = options.split(" ")
    for item in option_list:
        if virsh.has_command_help_match(command, item) is None:
            test.cancel("The current libvirt version"
                        " doesn't support '%s' option" % item)

    # Init expect vcpu count values
    exp_vcpu = {'max_config': max_vcpu, 'max_live': max_vcpu,
                'cur_config': current_vcpu, 'cur_live': current_vcpu,
                'guest_live': current_vcpu}

    def set_expected(vm, options):
        """
        Set the expected vcpu numbers

        :param vm: vm object
        :param options: setvcpus options
        """
        if ("config" in options) or ("current" in options and vm.is_dead()):
            if "maximum" in options:
                exp_vcpu["max_config"] = count
            else:
                exp_vcpu['cur_config'] = count
        if ("live" in options) or ("current" in options and vm.is_alive()):
            if "maximum" in options:
                exp_vcpu['max_live'] = count
            else:
                exp_vcpu['cur_live'] = count
                exp_vcpu['guest_live'] = count
        if options == '':
            # when none given it defaults to live
            exp_vcpu['cur_live'] = count
            exp_vcpu['guest_live'] = count

    # Save original configuration
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = vmxml.copy()

    # Normal processing of the test is to set the maximum vcpu count to 4,
    # and set the current vcpu count to 1, then adjust the 'count' value to
    # plug or unplug vcpus.
    #
    # This is generally fine when the guest is not running; however, the
    # hotswap functionality hasn't always worked very well and is under
    # going lots of change from using the hmp "cpu_set" command in 1.5
    # to a new qmp "cpu-add" added in 1.6 where the "cpu-set" command
    # seems to have been deprecated making things very messy.
    #
    # To further muddy the waters, the "cpu-add" functionality is supported
    # for specific machine type versions. For the purposes of this test that
    # would be "pc-i440fx-1.5" or "pc-q35-1.5" or later type machines (from
    # guest XML "<os> <type ... machine=''/type> </os>"). Depending on which
    # version of qemu/kvm was used to initially create/generate the XML for
    # the machine this could result in a newer qemu still using 1.4 or earlier
    # for the machine type.
    #
    try:
        # remove acpi features if need
        if no_acpi:
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vmxml.xmltreefile.find('features'):
                vmxml_feature = vmxml.features
                if vmxml_feature.has_feature('acpi'):
                    vmxml_feature.remove_feature('acpi')
                    vmxml.features = vmxml_feature
                    vmxml.sync()

        # Set maximum vcpus, so we can run all kinds of normal tests without
        # encounter requested vcpus greater than max allowable vcpus error
        topology = vmxml.get_cpu_topology()
        if topology and ("config" and "maximum" in options) and not status_error:
            # https://bugzilla.redhat.com/show_bug.cgi?id=1426220
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            del vmxml.cpu
            vmxml.sync()

        # If topology not existed, create new one.
        if not topology and with_topology and status_error:
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            try:
                vmcpu_xml = vmxml['cpu']
            except xcepts.LibvirtXMLNotFoundError:
                logging.debug("Can not find any cpu tag, now create one.")
                vmcpu_xml = VMCPUXML()
            cores = vmxml['vcpu']
            vmcpu_xml['topology'] = {'sockets': 1,
                                     'cores': cores,
                                     'threads': 1}
            vmxml['cpu'] = vmcpu_xml
            vmxml.sync()

        vmxml.set_vm_vcpus(vm_name, max_vcpu, current_vcpu,
                           topology_correction=topology_correction)

        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        logging.debug("Pre-test xml is %s", vmxml.xmltreefile)

        # Get the number of cpus, current value if set, and machine type
        cpu_xml_data = cpu.get_cpu_xmldata(vm, options)
        logging.debug("Before run setvcpus: cpu_count=%d, cpu_current=%d,"
                      " mtype=%s", cpu_xml_data['vcpu'],
                      cpu_xml_data['current_vcpu'], cpu_xml_data['mtype'])

        # Restart, unless that's not our test
        if not vm.is_alive():
            vm.start()
        vm.wait_for_login()

        if cpu_xml_data['vcpu'] == 1 and count == 1:
            logging.debug("Original vCPU count is 1, just checking if setvcpus "
                          "can still set current.")

        domid = vm.get_id()  # only valid for running
        domuuid = vm.get_uuid()

        if pre_vm_state == "paused":
            vm.pause()
        elif pre_vm_state == "shut off" and vm.is_alive():
            vm.destroy()

        # Run test
        if vm_ref == "name":
            dom_option = vm_name
        elif vm_ref == "id":
            dom_option = domid
            if params.get("setvcpus_hex_id") is not None:
                dom_option = hex(int(domid))
            elif params.get("setvcpus_invalid_id") is not None:
                dom_option = params.get("setvcpus_invalid_id")
        elif vm_ref == "uuid":
            dom_option = domuuid
            if params.get("setvcpus_invalid_uuid") is not None:
                dom_option = params.get("setvcpus_invalid_uuid")
        else:
            dom_option = vm_ref

        if remote_uri:
            status = virsh.setvcpus(dom_option, "1", "--config",
                                    ignore_status=True, debug=True, uri=remote_uri)
        else:
            if update_maxmum_config:
                virsh.setvcpus(vm_name, count_option, options + " --maximum",
                               ignore_status=False, debug=True)
                set_expected(vm, options + " --maximum")

            if hot_unplug and hotplugin_count:
                virsh.setvcpus(vm_name, hotplugin_count, "",
                               ignore_status=False, debug=True)

            if vm_reboot:
                vm.reboot()

            status = virsh.setvcpus(dom_option, count_option, options,
                                    ignore_status=True, debug=True)
            if not status_error:
                if restart_vm:
                    if vm.is_alive():
                        vm.destroy()
                    vm.start()
                    vm.wait_for_login().close()
                    set_expected(vm, re.sub("--config", "", options))
                    set_expected(vm, options + " live")

                set_expected(vm, options)
                result = cpu.check_vcpu_value(vm, exp_vcpu,
                                              option=options)
        setvcpu_exit_status = status.exit_status
        setvcpu_exit_stderr = status.stderr.strip()

    finally:
        cpu_xml_data = cpu.get_cpu_xmldata(vm, options)
        logging.debug("After run setvcpus: cpu_count=%d, cpu_current=%d,"
                      " mtype=%s", cpu_xml_data['vcpu'],
                      cpu_xml_data['current_vcpu'], cpu_xml_data['mtype'])

        # Cleanup
        if pre_vm_state == "paused":
            virsh.resume(vm_name, ignore_status=True)
        orig_config_xml.sync()
        if os.path.exists(tmpxml):
            os.remove(tmpxml)

    # check status_error
    if status_error:
        if setvcpu_exit_status == 0:
            test.fail("Run successfully with wrong command!")
    else:
        if setvcpu_exit_status != 0:
            # setvcpu/hotplug is only available as of qemu 1.5 and it's still
            # evolving. In general the addition of vcpu's may use the QMP
            # "cpu_set" (qemu 1.5) or "cpu-add" (qemu 1.6 and later) commands.
            # The removal of vcpu's may work in qemu 1.5 due to how cpu_set
            # can set vcpus online or offline; however, there doesn't appear
            # to be a complementary cpu-del feature yet, so we can add, but
            # not delete in 1.6.

            # A 1.6 qemu will not allow the cpu-add command to be run on
            # a configuration using <os> machine property 1.4 or earlier.
            # That is the XML <os> element with the <type> property having
            # an attribute 'machine' which is a tuple of 3 elements separated
            # by a dash, such as "pc-i440fx-1.5" or "pc-q35-1.5".
            if re.search("unable to execute QEMU command 'cpu-add'",
                         setvcpu_exit_stderr):
                test.cancel("guest <os> machine property '%s' "
                            "may be too old to allow hotplug." % cpu_xml_data['mtype'])

            # A qemu older than 1.5 or an unplug for 1.6 will result in
            # the following failure.  In general, any time libvirt determines
            # it cannot support adding or removing a vCPU...
            if re.search("cannot change vcpu count of this domain",
                         setvcpu_exit_stderr):
                test.cancel("virsh setvcpu hotplug unsupported, "
                            " mtype=%s" % cpu_xml_data['mtype'])

            # Otherwise, it seems we have a real error
            test.fail("Run failed with right command mtype=%s"
                      " stderr=%s" % (cpu_xml_data['mtype'], setvcpu_exit_stderr))
        else:
            if not result:
                test.fail("Test Failed")
Exemple #2
0
    def set_condition(vm_name, condn, reset=False, guestbt=None):
        """
        Set domain to given state or reset it.
        """
        bt = None
        if not reset:
            if condn == "avocado_test":
                testlist = utils_test.get_avocadotestlist(params)
                bt = utils_test.run_avocado_bg(vm, params, test, testlist)
                if not bt:
                    test.cancel("guest stress failed to start")
                # Allow stress to start
                time.sleep(condn_sleep_sec)
                return bt
            elif condn == "stress":
                utils_test.load_stress("stress_in_vms",
                                       params=params,
                                       vms=[vm])
            elif condn in ["save", "managedsave"]:
                # No action
                pass
            elif condn == "suspend":
                result = virsh.suspend(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "hotplug":
                result = virsh.setvcpus(vm_name,
                                        max_vcpu,
                                        "--live",
                                        ignore_status=True,
                                        debug=True)
                libvirt.check_exit_status(result)
                exp_vcpu = {
                    'max_config': max_vcpu,
                    'max_live': max_vcpu,
                    'cur_config': current_vcpu,
                    'cur_live': max_vcpu,
                    'guest_live': max_vcpu
                }
                result = cpu.check_vcpu_value(vm, exp_vcpu, option="--live")
            elif condn == "host_smt":
                if cpuutil.get_cpu_vendor_name() == 'power9':
                    result = process.run("ppc64_cpu --smt=4", shell=True)
                else:
                    test.cancel(
                        "Host SMT changes not allowed during guest live")
            else:
                logging.debug("No operation for the domain")

        else:
            if condn == "save":
                save_file = os.path.join(data_dir.get_tmp_dir(),
                                         vm_name + ".save")
                result = virsh.save(vm_name,
                                    save_file,
                                    ignore_status=True,
                                    debug=True)
                libvirt.check_exit_status(result)
                time.sleep(condn_sleep_sec)
                if os.path.exists(save_file):
                    result = virsh.restore(save_file,
                                           ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    os.remove(save_file)
                else:
                    test.error("No save file for domain restore")
            elif condn == "managedsave":
                result = virsh.managedsave(vm_name,
                                           ignore_status=True,
                                           debug=True)
                libvirt.check_exit_status(result)
                time.sleep(condn_sleep_sec)
                result = virsh.start(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "suspend":
                result = virsh.resume(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "avocado_test":
                guestbt.join()
            elif condn == "stress":
                utils_test.unload_stress("stress_in_vms",
                                         params=params,
                                         vms=[vm])
            elif condn == "hotplug":
                result = virsh.setvcpus(vm_name,
                                        current_vcpu,
                                        "--live",
                                        ignore_status=True,
                                        debug=True)
                libvirt.check_exit_status(result)
                exp_vcpu = {
                    'max_config': max_vcpu,
                    'max_live': current_vcpu,
                    'cur_config': current_vcpu,
                    'cur_live': current_vcpu,
                    'guest_live': current_vcpu
                }
                result = cpu.check_vcpu_value(vm, exp_vcpu, option="--live")
            elif condn == "host_smt":
                result = process.run("ppc64_cpu --smt=2", shell=True)
                # Change back the host smt
                result = process.run("ppc64_cpu --smt=4", shell=True)
                # Work around due to known cgroup issue after cpu hot(un)plug
                # sequence
                root_cpuset_path = utils_cgroup.get_cgroup_mountpoint("cpuset")
                machine_cpuset_paths = []
                if os.path.isdir(
                        os.path.join(root_cpuset_path, "machine.slice")):
                    machine_cpuset_paths.append(
                        os.path.join(root_cpuset_path, "machine.slice"))
                if os.path.isdir(os.path.join(root_cpuset_path, "machine")):
                    machine_cpuset_paths.append(
                        os.path.join(root_cpuset_path, "machine"))
                if not machine_cpuset_paths:
                    logging.warning("cgroup cpuset might not recover properly "
                                    "for guests after host smt changes, "
                                    "restore it manually")
                root_cpuset_cpus = os.path.join(root_cpuset_path,
                                                "cpuset.cpus")
                for path in machine_cpuset_paths:
                    machine_cpuset_cpus = os.path.join(path, "cpuset.cpus")
                    # check if file content differs
                    cmd = "diff %s %s" % (root_cpuset_cpus,
                                          machine_cpuset_cpus)
                    if process.system(cmd, verbose=True, ignore_status=True):
                        cmd = "cp %s %s" % (root_cpuset_cpus,
                                            machine_cpuset_cpus)
                        process.system(cmd, verbose=True)

            else:
                logging.debug("No need recover the domain")
        return bt
def run(test, params, env):
    """
    Domain CPU management testing.

    1. Prepare a domain for testing, install qemu-guest-ga if needed.
    2. Checking for vcpu numbers in vcpucount, vcpuinfo, domain xml,
       vcpupin and inside domain.
    3. Plug vcpu for the domain.
    4. Repeat step 2 to check again.
    5. Control domain(save, managedsave, s3, s4, etc.).
    6. Repeat step 2 to check again.
    7. Recover domain(restore, wakeup, etc.).
    8. Repeat step 2 to check again.
    9. Unplug vcpu for the domain.
    10. Repeat step 2 to check again.
    11. Repeat step 5 to control domain(As BZ#1088216 not fix, skip
        save/managedsave related actions).
    12. Repeat step 2 to check again.
    13. Repeat step 7 to recover domain.
    14. Repeat step 2 to check again.
    15. Recover test environment.
    """

    def manipulate_domain(vm_name, vm_operation, recover=False):
        """
        Operate domain to given state or recover it.

        :params vm_name: Name of the VM domain
        :params vm_operation: Operation to be performed on VM domain
                              like save, managedsave, suspend
        :params recover: flag to inform whether to set or reset
                         vm_operation
        """
        global vm_uptime_init
        save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
        if not recover:
            if vm_operation == "save":
                save_option = ""
                result = virsh.save(vm_name, save_file, save_option,
                                    ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "managedsave":
                managedsave_option = ""
                result = virsh.managedsave(vm_name, managedsave_option,
                                           ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s3":
                suspend_target = "mem"
                result = virsh.dompmsuspend(vm_name, suspend_target,
                                            ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s4":
                suspend_target = "disk"
                result = virsh.dompmsuspend(vm_name, suspend_target,
                                            ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                # Wait domain state change: 'in shutdown' -> 'shut off'
                utils_misc.wait_for(lambda: virsh.is_dead(vm_name), 5)
            elif vm_operation == "suspend":
                result = virsh.suspend(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "reboot":
                vm.reboot()
                vm_uptime_init = vm.uptime()
            else:
                logging.debug("No operation for the domain")

        else:
            if vm_operation == "save":
                if os.path.exists(save_file):
                    result = virsh.restore(save_file, ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    os.remove(save_file)
                else:
                    test.error("No save file for domain restore")
            elif vm_operation in ["managedsave", "s4"]:
                result = virsh.start(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s3":
                suspend_target = "mem"
                result = virsh.dompmwakeup(vm_name, ignore_status=True,
                                           debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "suspend":
                result = virsh.resume(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "reboot":
                pass
            else:
                logging.debug("No need recover the domain")

    def online_new_vcpu(vm, vcpu_plug_num):
        """
        For Fedora/RHEL7 guests, udev can not online hot-added CPUs
        automatically, (refer to BZ#968811 for details) so enable them manually.

        :params vm: VM object
        :params vcpu_plug_num: Hotplugged vcpu count
        """
        cpu_is_online = []
        session = vm.wait_for_login()
        for i in range(1, int(vcpu_plug_num)):
            cpu_is_online.append(False)
            cpu = "/sys/devices/system/cpu/cpu%s/online" % i
            cmd_s, cmd_o = session.cmd_status_output("cat %s" % cpu)
            logging.debug("cmd exist status: %s, cmd output %s", cmd_s, cmd_o)
            if cmd_s != 0:
                logging.error("Can not find cpu %s in domain", i)
            else:
                if cmd_o.strip() == "0":
                    if session.cmd_status("echo 1 > %s" % cpu) == 0:
                        cpu_is_online[i-1] = True
                    else:
                        logging.error("Fail to enable cpu %s online", i)
                else:
                    cpu_is_online[i-1] = True
        session.close()
        return False not in cpu_is_online

    def check_setvcpus_result(cmd_result, expect_error):
        """
        Check command result.

        For setvcpus, pass unsupported commands(plug or unplug vcpus) by
        checking command stderr.

        :params cmd_result: Command result
        :params expect_error: Whether to expect error True or False
        """
        if cmd_result.exit_status != 0:
            if expect_error:
                logging.debug("Expect fail: %s", cmd_result.stderr)
                return
            # setvcpu/hotplug is only available as of qemu 1.5 and it's still
            # evolving. In general the addition of vcpu's may use the QMP
            # "cpu_set" (qemu 1.5) or "cpu-add" (qemu 1.6 and later) commands.
            # The removal of vcpu's may work in qemu 1.5 due to how cpu_set
            # can set vcpus online or offline; however, there doesn't appear
            # to be a complementary cpu-del feature yet, so we can add, but
            # not delete in 1.6.

            # A 1.6 qemu will not allow the cpu-add command to be run on
            # a configuration using <os> machine property 1.4 or earlier.
            # That is the XML <os> element with the <type> property having
            # an attribute 'machine' which is a tuple of 3 elements separated
            # by a dash, such as "pc-i440fx-1.5" or "pc-q35-1.5".
            if re.search("unable to execute QEMU command 'cpu-add'",
                         cmd_result.stderr):
                test.cancel("guest <os> machine property may be too"
                            "  old to allow hotplug")

            # A qemu older than 1.5 or an unplug for 1.6 will result in
            # the following failure.  In general, any time libvirt determines
            # it cannot support adding or removing a vCPU...
            if re.search("cannot change vcpu count of this domain",
                         cmd_result.stderr):
                test.cancel("Unsupport virsh setvcpu hotplug")

            # Maybe QEMU doesn't support unplug vcpu
            if re.search("Operation not supported: qemu didn't unplug the vCPUs",
                         cmd_result.stderr):
                test.cancel("Your qemu unsupport unplug vcpu")

            # Qemu guest agent version could be too low
            if re.search("The command guest-get-vcpus has not been found",
                         cmd_result.stderr):
                err_msg = "Your agent version is too low: %s" % cmd_result.stderr
                logging.warning(err_msg)
                test.cancel(err_msg)

            # Attempting to enable more vCPUs in the guest than is currently
            # enabled in the guest but less than the maximum count for the VM
            if re.search("requested vcpu count is greater than the count of "
                         "enabled vcpus in the domain",
                         cmd_result.stderr):
                logging.debug("Expect fail: %s", cmd_result.stderr)
                return

            # Otherwise, it seems we have a real error
            test.fail("Run failed with right command: %s"
                      % cmd_result.stderr)
        else:
            if expect_error:
                test.fail("Expect fail but run successfully")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    global vm_uptime_init
    vm_operation = params.get("vm_operation", "null")
    vcpu_max_num = int(params.get("vcpu_max_num"))
    vcpu_current_num = int(params.get("vcpu_current_num"))
    vcpu_plug = "yes" == params.get("vcpu_plug", "no")
    vcpu_plug_num = int(params.get("vcpu_plug_num"))
    vcpu_unplug = "yes" == params.get("vcpu_unplug", "no")
    vcpu_unplug_num = int(params.get("vcpu_unplug_num"))
    vcpu_max_timeout = int(params.get("vcpu_max_timeout", "480"))
    setvcpu_option = params.get("setvcpu_option", "")
    agent_channel = "yes" == params.get("agent_channel", "yes")
    install_qemuga = "yes" == params.get("install_qemuga", "no")
    start_qemuga = "yes" == params.get("start_qemuga", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    setvcpu_readonly = "yes" == params.get("setvcpu_readonly", "no")
    status_error = "yes" == params.get("status_error", "no")
    pin_before_plug = "yes" == params.get("pin_before_plug", "no")
    pin_after_plug = "yes" == params.get("pin_after_plug", "no")
    pin_before_unplug = "yes" == params.get("pin_before_unplug", "no")
    pin_after_unplug = "yes" == params.get("pin_after_unplug", "no")
    pin_vcpu = params.get("pin_vcpu")
    pin_cpu_list = params.get("pin_cpu_list", "x")
    check_after_plug_fail = "yes" == params.get("check_after_plug_fail", "no")
    with_stress = "yes" == params.get("run_stress", "no")
    iterations = int(params.get("test_itr", 1))
    topology_correction = "yes" == params.get("topology_correction", "no")
    # Init expect vcpu count values
    expect_vcpu_num = {'max_config': vcpu_max_num, 'max_live': vcpu_max_num,
                       'cur_config': vcpu_current_num,
                       'cur_live': vcpu_current_num,
                       'guest_live': vcpu_current_num}
    if check_after_plug_fail:
        expect_vcpu_num_bk = expect_vcpu_num.copy()
    # Init expect vcpu pin values
    expect_vcpupin = {}
    result_failed = 0

    # Init cpu-list for vcpupin
    host_cpu_count = os.sysconf('SC_NPROCESSORS_CONF')
    if (int(host_cpu_count) < 2) and (not pin_cpu_list == "x"):
        test.cancel("We need more cpus on host in this case for the cpu-list"
                    "=%s. But current number of cpu on host is %s."
                    % (pin_cpu_list, host_cpu_count))

    cpus_list = cpu_util.cpu_online_list()
    logging.debug("Active cpus in host are %s", cpus_list)

    cpu_seq_str = ""
    for i in range(len(cpus_list) - 1):
        if int(cpus_list[i]) + 1 == int(cpus_list[i + 1]):
            cpu_seq_str = "%s-%s" % (cpus_list[i], cpus_list[i + 1])
            break

    if pin_cpu_list == "x":
        pin_cpu_list = cpus_list[-1]
    if pin_cpu_list == "x-y":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str
        else:
            pin_cpu_list = "%s-%s" % (cpus_list[0], cpus_list[0])
    elif pin_cpu_list == "x,y":
        pin_cpu_list = "%s,%s" % (cpus_list[0], cpus_list[1])
    elif pin_cpu_list == "x-y,^z":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str + ",^%s" % cpu_seq_str.split('-')[1]
        else:
            pin_cpu_list = "%s,%s,^%s" % (cpus_list[0], cpus_list[1],
                                          cpus_list[0])
    else:
        # Just use the value get from cfg
        pass

    need_mkswap = False
    # Back up domain XML
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    try:
        # Customize domain vcpu number
        if vm.is_alive():
            vm.destroy()
        if agent_channel:
            vmxml.set_agent_channel()
        else:
            vmxml.remove_agent_channels()
        vmxml.sync()

        vmxml.set_vm_vcpus(vm_name, vcpu_max_num, vcpu_current_num,
                           topology_correction=topology_correction)
        # Do not apply S3/S4 on power
        cpu_arch = platform.machine()
        if cpu_arch in ('x86_64', 'i386', 'i686'):
            vmxml.set_pm_suspend(vm_name, "yes", "yes")
        vm.start()
        vm_uptime_init = vm.uptime()
        if with_stress:
            bt = utils_test.run_avocado_bg(vm, params, test)
            if not bt:
                test.cancel("guest stress failed to start")
        # Create swap partition/file if nessesary
        if vm_operation == "s4":
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition")
            vm.create_swap_partition()

        # Prepare qemu guest agent
        if install_qemuga:
            vm.prepare_guest_agent(prepare_xml=False, start=start_qemuga)
            vm.setenforce(0)
        else:
            # Remove qemu-guest-agent for negative test
            vm.remove_package('qemu-guest-agent')

        # Run test
        for _ in range(iterations):
            if not cpu.check_vcpu_value(vm, expect_vcpu_num):
                logging.error("Expected vcpu check failed")
                result_failed += 1
            # plug vcpu
            if vcpu_plug:
                # Pin vcpu
                if pin_before_plug:
                    result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                           ignore_status=True, debug=True)
                    libvirt.check_exit_status(result)
                    expect_vcpupin = {pin_vcpu: pin_cpu_list}

                result = virsh.setvcpus(vm_name, vcpu_plug_num, setvcpu_option,
                                        readonly=setvcpu_readonly,
                                        ignore_status=True, debug=True)
                check_setvcpus_result(result, status_error)

                if setvcpu_option == "--config":
                    expect_vcpu_num['cur_config'] = vcpu_plug_num
                elif setvcpu_option == "--guest":
                    # vcpuset '--guest' only affect vcpu number in guest
                    expect_vcpu_num['guest_live'] = vcpu_plug_num
                else:
                    expect_vcpu_num['cur_live'] = vcpu_plug_num
                    expect_vcpu_num['guest_live'] = vcpu_plug_num
                    if not status_error:
                        if not utils_misc.wait_for(lambda: cpu.check_if_vm_vcpu_match(vcpu_plug_num, vm),
                                                   vcpu_max_timeout, text="wait for vcpu online") or not online_new_vcpu(vm, vcpu_plug_num):
                            test.fail("Fail to enable new added cpu")

                # Pin vcpu
                if pin_after_plug:
                    result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                           ignore_status=True, debug=True)
                    libvirt.check_exit_status(result)
                    expect_vcpupin = {pin_vcpu: pin_cpu_list}

                if status_error and check_after_plug_fail:
                    if not cpu.check_vcpu_value(vm, expect_vcpu_num_bk, {}, setvcpu_option):
                        logging.error("Expected vcpu check failed")
                        result_failed += 1

                if not status_error:
                    if restart_libvirtd:
                        utils_libvirtd.libvirtd_restart()

                    # Check vcpu number and related commands
                    if not cpu.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option):
                        logging.error("Expected vcpu check failed")
                        result_failed += 1

                    # Control domain
                    manipulate_domain(vm_name, vm_operation)

                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        if not cpu.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option):
                            logging.error("Expected vcpu check failed")
                            result_failed += 1

                    # Recover domain
                    manipulate_domain(vm_name, vm_operation, recover=True)

                    # Resume domain from S4 status may takes long time(QEMU bug),
                    # here we wait for 10 mins then skip the remaining part of
                    # tests if domain not resume successfully
                    try:
                        vm.wait_for_login(timeout=600)
                    except Exception as e:
                        test.warn("Skip remaining test steps as domain"
                                  " not resume in 10 mins: %s" % e)
                    # For hotplug/unplug vcpu without '--config flag, after
                    # suspend domain to disk(shut off) and re-start it, the
                    # current live vcpu number will recover to orinial value
                    if vm_operation == 's4':
                        if setvcpu_option.count("--config"):
                            expect_vcpu_num['cur_live'] = vcpu_plug_num
                            expect_vcpu_num['guest_live'] = vcpu_plug_num
                        elif setvcpu_option.count("--guest"):
                            expect_vcpu_num['guest_live'] = vcpu_plug_num
                        else:
                            expect_vcpu_num['cur_live'] = vcpu_current_num
                            expect_vcpu_num['guest_live'] = vcpu_current_num
                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        if not cpu.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option):
                            logging.error("Expected vcpu check failed")
                            result_failed += 1

            # Unplug vcpu
            # Since QEMU 2.2.0, by default all current vcpus are non-hotpluggable
            # when VM started , and it required that vcpu 0(id=1) is always
            # present and non-hotpluggable, which means we can't hotunplug these
            # vcpus directly. So we can either hotplug more vcpus before we do
            # hotunplug, or modify the 'hotpluggable' attribute to 'yes' of the
            # vcpus except vcpu 0, to make sure libvirt can find appropriate
            # hotpluggable vcpus to reach the desired target vcpu count. For
            # simple prepare step, here we choose to hotplug more vcpus.
            if vcpu_unplug:
                if setvcpu_option == "--live":
                    logging.info("Hotplug vcpu to the maximum count to make"
                                 "sure all these new plugged vcpus are "
                                 "hotunpluggable")
                    result = virsh.setvcpus(vm_name, vcpu_max_num, '--live',
                                            debug=True)
                    libvirt.check_exit_status(result)
                # Pin vcpu
                if pin_before_unplug:
                    result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                           ignore_status=True, debug=True)
                    libvirt.check_exit_status(result)
                    # As the vcpu will unplug later, so set
                    # expect_vcpupin to empty
                    expect_vcpupin = {}

                # Operation of setvcpus is asynchronization, even if it return,
                # may not mean it is complete, a poll checking of guest vcpu numbers
                # need to be executed.
                # So for case of unpluging vcpus from max vcpu number to 1, when
                # setvcpus return, need continue to obverse if vcpu number is
                # continually to be unplugged to 1 gradually.
                result = virsh.setvcpus(vm_name, vcpu_unplug_num,
                                        setvcpu_option,
                                        readonly=setvcpu_readonly,
                                        ignore_status=True, debug=True)
                unsupport_str = cpu.vcpuhotunplug_unsupport_str()
                if unsupport_str and (unsupport_str in result.stderr):
                    test.cancel("Vcpu hotunplug is not supported in this host:"
                                "\n%s" % result.stderr)
                try:
                    session = vm.wait_for_login()
                    cmd = "lscpu | grep \"^CPU(s):\""
                    operation = "setvcpus"
                    prev_output = -1
                    while True:
                        ret, output = session.cmd_status_output(cmd)
                        if ret:
                            test.error("Run lscpu failed, output: %s" % output)
                        output = output.split(":")[-1].strip()

                        if int(prev_output) == int(output):
                            break
                        prev_output = output
                        time.sleep(5)
                    logging.debug("CPUs available from inside guest after %s - %s",
                                  operation, output)
                    if int(output) != vcpu_unplug_num:
                        test.fail("CPU %s failed as cpus are not "
                                  "reflected from inside guest" % operation)
                finally:
                    if session:
                        session.close()

                check_setvcpus_result(result, status_error)
                if setvcpu_option == "--config":
                    expect_vcpu_num['cur_config'] = vcpu_unplug_num
                elif setvcpu_option == "--guest":
                    # vcpuset '--guest' only affect vcpu number in guest
                    expect_vcpu_num['guest_live'] = vcpu_unplug_num
                else:
                    expect_vcpu_num['cur_live'] = vcpu_unplug_num
                    expect_vcpu_num['guest_live'] = vcpu_unplug_num

                # Pin vcpu
                if pin_after_unplug:
                    result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                           ignore_status=True, debug=True)
                    libvirt.check_exit_status(result)
                    expect_vcpupin = {pin_vcpu: pin_cpu_list}

                if not status_error:
                    if restart_libvirtd:
                        utils_libvirtd.libvirtd_restart()

                    # Check vcpu number and related commands
                    if not cpu.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option):
                        logging.error("Expected vcpu check failed")
                        result_failed += 1

                    # Control domain
                    manipulate_domain(vm_name, vm_operation)

                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        if not cpu.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option):
                            logging.error("Expected vcpu check failed")
                            result_failed += 1

                    # Recover domain
                    manipulate_domain(vm_name, vm_operation, recover=True)

                    # Resume domain from S4 status may takes long time
                    # (QEMU bug), here we wait for 10 mins then skip the
                    # remaining part of tests if domain not resume successfully
                    try:
                        vm.wait_for_login(timeout=600)
                    except Exception as e:
                        test.warn("Skip remaining test steps as domain"
                                  " not resume in 10 mins: %s" % e)
                    # For hotplug/unplug vcpu without '--config flag, after
                    # suspend domain to disk(shut off) and re-start it, the
                    # current live vcpu number will recover to orinial value
                    if vm_operation == 's4':
                        if setvcpu_option.count("--config"):
                            expect_vcpu_num['cur_live'] = vcpu_unplug_num
                            expect_vcpu_num['guest_live'] = vcpu_unplug_num
                        elif setvcpu_option.count("--guest"):
                            expect_vcpu_num['guest_live'] = vcpu_unplug_num
                        else:
                            expect_vcpu_num['cur_live'] = vcpu_current_num
                            expect_vcpu_num['guest_live'] = vcpu_current_num
                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        if not cpu.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option):
                            logging.error("Expected vcpu check failed")
                            result_failed += 1
        if vm.uptime() < vm_uptime_init:
            test.fail("Unexpected VM reboot detected in between test")
    # Recover env
    finally:
        if need_mkswap:
            vm.cleanup_swap()
        if with_stress:
            if "bt" in locals() and bt:
                bt.join(ignore_status=True)
        vm.destroy()
        backup_xml.sync()

    if not status_error:
        if result_failed > 0:
            test.fail("Test Failed")
Exemple #4
0
def run(test, params, env):
    """
    Test command: virsh setvcpu.

    The command can change the number of virtual CPUs in the guest domain.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh setvcpu operation.
    3. Check in the following places
    vcpuinfo
    vcpupin
    vcpucount
    inside guest
    4.Recover test environment.
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    pre_vm_state = params.get("setvcpu_pre_vm_state")
    options = params.get("setvcpu_options")
    vm_ref = params.get("setvcpu_vm_ref", "name")
    current_vcpu = int(params.get("setvcpu_current", "2"))
    vcpu_list_format = params.get("setvcpu_list_format", "comma")
    iteration = int(params.get("setvcpu_iteration", 1))
    invalid_vcpulist = params.get("invalid_vcpulist", "")
    convert_err = "Can't convert {0} to integer type"
    unsupport_str = params.get("unsupport_str", "")
    try:
        current_vcpu = int(params.get("setvcpu_current", "1"))
    except ValueError:
        test.cancel(convert_err.format(current_vcpu))

    try:
        max_vcpu = int(params.get("setvcpu_max", "4"))
    except ValueError:
        test.cancel(convert_err.format(max_vcpu))

    extra_param = params.get("setvcpu_extra_param")
    status_error = params.get("status_error")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    set_topology = "yes" == params.get("set_topology", "no")
    sockets = int(params.get("topology_sockets", '1'))
    cores = int(params.get("topology_cores", '4'))
    threads = int(params.get("topology_threads", '1'))
    start_vm_after_set = "yes" == params.get("start_vm_after_set", "no")
    iteration = int(params.get("hotplug_iteration", "1"))

    # Early death
    option_list = options.split(" ")
    for item in option_list:
        if virsh.has_command_help_match("setvcpu", item) is None:
            test.cancel("The current libvirt "
                        "version doesn't support "
                        "'%s' option" % item)

    # Calculate count options
    vcpu_list = []
    # Init expect vcpu count values
    exp_vcpu = {'max_config': max_vcpu, 'max_live': max_vcpu,
                'cur_config': current_vcpu, 'cur_live': current_vcpu,
                'guest_live': current_vcpu}

    def set_expected(vm, options, enabled=True):
        """
        Set the expected vcpu numbers

        :param vm: vm object
        :param options: setvcpu options
        :param enabled: True or False base on enable or disable
        """
        if enabled:
            if ("config" in options) or ("current" in options and vm.is_dead()):
                exp_vcpu['cur_config'] += threads
            elif ("live" in options) or ("current" in options and vm.is_alive()):
                exp_vcpu['cur_live'] += threads
                exp_vcpu['guest_live'] += threads
            else:
                # when none given it defaults to live
                exp_vcpu['cur_live'] += threads
                exp_vcpu['guest_live'] += threads
        else:
            if ("--config" in options) or ("--current" in options and vm.is_dead()):
                exp_vcpu['cur_config'] -= threads
            elif ("--live" in options) or ("--current" in options and vm.is_alive()):
                exp_vcpu['cur_live'] -= threads
                exp_vcpu['guest_live'] -= threads
            else:
                # when none given it defaults to live
                exp_vcpu['cur_live'] -= threads
                exp_vcpu['guest_live'] -= threads

    if threads > 1:
        start_vcpu = current_vcpu
    else:
        start_vcpu = current_vcpu + 1

    if 'hypen' in vcpu_list_format:
        for vcpu_start in range(start_vcpu, max_vcpu, threads):
            if int(threads) > 1:
                lst = "%s-%s" % (
                    str(vcpu_start), str(vcpu_start + threads - 1))
            else:
                lst = vcpu_start
            vcpu_list.append(lst)
    elif 'comma' in vcpu_list_format:
        for vcpu_start in range(start_vcpu, max_vcpu, threads):
            if int(threads) > 1:
                lst = ''
                for idx in range(vcpu_start, vcpu_start + threads):
                    lst += "%s," % idx
            else:
                lst = vcpu_start
            vcpu_list.append(lst.strip(','))
    else:
        pass

    # Early death
    if vm_ref == "remote" and (remote_ip.count("EXAMPLE.COM") or
                               local_ip.count("EXAMPLE.COM")):
        test.cancel("remote/local ip parameters not set.")

    # Save original configuration
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = vmxml.copy()

    # Run test
    try:
        if vm.is_alive():
            vm.destroy()

        # Set cpu topology
        if set_topology:
            vmxml.set_vm_vcpus(vm.name, max_vcpu, current_vcpu,
                               sockets=sockets, cores=cores, threads=threads,
                               add_topology=True)

        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        logging.debug("Pre-test xml is %s", vmxml.xmltreefile)
        if not vm.is_alive():
            vm.start()
        vm.wait_for_login()

        domid = vm.get_id()  # only valid for running
        domuuid = vm.get_uuid()

        if pre_vm_state == "paused":
            vm.pause()
        elif pre_vm_state == "shut off" and vm.is_alive():
            vm.destroy()

        setvcpu_exit_status = 0
        setvcpu_exit_stderr = ''
        # TODO: Run remote test,for future use
        if vm_ref == "remote":
            pass
        # Run local test
        else:
            if vm_ref == "name":
                dom_option = vm_name
            elif vm_ref == "id":
                dom_option = domid
                if params.get("setvcpu_hex_id") is not None:
                    dom_option = hex(int(domid))
                elif params.get("setvcpu_invalid_id") is not None:
                    dom_option = params.get("setvcpu_invalid_id")
            elif vm_ref == "uuid":
                dom_option = domuuid
                if params.get("setvcpu_invalid_uuid") is not None:
                    dom_option = params.get("setvcpu_invalid_uuid")
            else:
                dom_option = vm_ref
            for itr in range(iteration):
                if extra_param:
                    vcpu_list_option = "%s %s" % (vcpu_list[itr], extra_param)
                elif invalid_vcpulist != "":
                    vcpu_list_option = invalid_vcpulist
                else:
                    vcpu_list_option = vcpu_list[itr]
                if 'enable' in options:
                    status = virsh.setvcpu(
                        dom_option, vcpu_list_option, options,
                        ignore_status=True, debug=True)
                    # Preserve the first failure
                    if status.exit_status != 0:
                        setvcpu_exit_status = status.exit_status
                    # Accumulate the error strings
                    setvcpu_exit_stderr += "itr-%d-enable: %s\n" % (itr,
                                                                    status.stderr.strip())
                    set_expected(vm, options, True)
                elif 'disable' in options:
                    # disable needs a hotpluggable cpus, lets make sure we have
                    if status_error != "yes":
                        options_enable = options.replace("disable", "enable")
                        virsh.setvcpu(dom_option, vcpu_list_option,
                                      options_enable, ignore_status=False,
                                      debug=True)
                        # Adjust the expected vcpus
                        set_expected(vm, options, True)
                    status = virsh.setvcpu(
                        dom_option, vcpu_list_option, options,
                        ignore_status=True, debug=True)
                    unsupport_str = cpu.vcpuhotunplug_unsupport_str()
                    if unsupport_str and (unsupport_str in status.stderr):
                        test.cancel("Vcpu hotunplug is not supported in this host:"
                                    "\n%s" % status.stderr)
                    # Preserve the first failure
                    if status.exit_status != 0:
                        setvcpu_exit_status = status.exit_status
                    # Accumulate the error strings
                    setvcpu_exit_stderr += "itr-%d-disable: %s\n" % (itr,
                                                                     status.stderr.strip())
                    # Adjust the expected vcpus
                    set_expected(vm, options, False)
                # Handle error cases
                else:
                    status = virsh.setvcpu(dom_option, vcpu_list_option,
                                           options, ignore_status=True,
                                           debug=True)
                    # Preserve the first failure
                    if status.exit_status != 0:
                        setvcpu_exit_status = status.exit_status
                    # Accumulate the error strings
                    setvcpu_exit_stderr += "itr-%d-error: %s\n" % (itr,
                                                                   status.stderr.strip())
            # Start VM after set vcpu
            if start_vm_after_set:
                if "--enable" in options:
                    if "--config" in options or "--current" in options:
                        exp_vcpu['cur_live'] = exp_vcpu['cur_config']
                if "--disable" in options:
                    if "--config" in options or "--current" in options:
                        exp_vcpu['cur_live'] = exp_vcpu['cur_config']
                if vm.is_alive():
                    logging.debug("VM already started")
                else:
                    result = virsh.start(vm_name, ignore_status=True,
                                         debug=True)
                    libvirt.check_exit_status(result)

            # Lets validate the result in positive cases
            if status_error != "yes":
                result = cpu.check_vcpu_value(vm, exp_vcpu,
                                              option=options)
    finally:
        if pre_vm_state == "paused":
            virsh.resume(vm_name, ignore_status=True)
        orig_config_xml.sync()

    # check status_error
    if status_error == "yes":
        if setvcpu_exit_status == 0:
            test.fail("Run successfully with wrong command!")
    else:
        if setvcpu_exit_status != 0:
            # Otherwise, it seems we have a real error
            test.fail("Run failed with right command"
                      " stderr=%s" % setvcpu_exit_stderr)
        else:
            if not result:
                test.fail("Test Failed")
Exemple #5
0
    def vm_stress_events(self, event, vm, params):
        """
        Stress events

        :param event: event name
        :param vm: vm object
        """
        current_vcpu = int(params.get("smp", 2))
        max_vcpu = int(params.get("vcpu_maxcpus", 2))
        iface_num = params.get("iface_num", '1')
        iface_type = params.get("iface_type", "network")
        iface_model = params.get("iface_model", "virtio")
        iface_source = eval(params.get("iface_source", "{'network':'default'}"))
        attach_option = params.get("attach_option", "")
        detach_option = params.get("detach_option", "")
        disk_size = params.get("virt_disk_device_size", "1")
        disk_type = params.get("disk_type", "file")
        disk_device = params.get("disk_device", "disk")
        disk_format = params.get("disk_format", "qcow2")
        device_target = params.get("virt_disk_device_target", "vda").split()
        path = params.get("path", "")
        device_source_names = params.get("virt_disk_device_source", "").split()
        disk_driver = params.get("driver_name", "qemu")
        self.ignore_status = params.get("ignore_status", "no") == "yes"
        dargs = {'ignore_status': True, 'debug': True}
        for itr in range(self.iterations):
            if "vcpupin" in event:
                for vcpu in range(current_vcpu):
                    result = virsh.vcpupin(vm.name, vcpu,
                                           random.choice(self.host_cpu_list),
                                           **dargs)
                    if not self.ignore_status:
                        libvirt.check_exit_status(result)
            elif "emulatorpin" in event:
                result = virsh.emulatorpin(vm.name,
                                           random.choice(self.host_cpu_list),
                                           **dargs)
                if not self.ignore_status:
                    libvirt.check_exit_status(result)
            elif "suspend" in event:
                result = virsh.suspend(vm.name, **dargs)
                if not self.ignore_status:
                    libvirt.check_exit_status(result)
                time.sleep(self.event_sleep_time)
                result = virsh.resume(vm.name, **dargs)
                if not self.ignore_status:
                    libvirt.check_exit_status(result)
            elif "cpuhotplug" in event:
                result = virsh.setvcpus(vm.name, max_vcpu, "--live",
                                        **dargs)
                if not self.ignore_status:
                    libvirt.check_exit_status(result)
                    exp_vcpu = {'max_config': max_vcpu,
                                'max_live': max_vcpu,
                                'cur_config': current_vcpu,
                                'cur_live': max_vcpu,
                                'guest_live': max_vcpu}
                    cpuutil.check_vcpu_value(
                        vm, exp_vcpu, option="--live")
                time.sleep(self.event_sleep_time)
                result = virsh.setvcpus(vm.name, current_vcpu, "--live",
                                        **dargs)
                if not self.ignore_status:
                    libvirt.check_exit_status(result)
                    exp_vcpu = {'max_config': max_vcpu,
                                'max_live': max_vcpu,
                                'cur_config': current_vcpu,
                                'cur_live': current_vcpu,
                                'guest_live': current_vcpu}
                    cpuutil.check_vcpu_value(
                        vm, exp_vcpu, option="--live")
            elif "reboot" in event:
                vm.reboot()
            elif "nethotplug" in event:
                for iface_num in range(int(iface_num)):
                    logging.debug("Try to attach interface %d" % iface_num)
                    mac = utils_net.generate_mac_address_simple()
                    options = ("%s %s --model %s --mac %s %s" %
                               (iface_type, iface_source['network'],
                                iface_model, mac, attach_option))
                    logging.debug("VM name: %s , Options for Network attach: %s", vm.name, options)
                    ret = virsh.attach_interface(vm.name, options,
                                                 ignore_status=True)
                    time.sleep(self.event_sleep_time)
                    if not self.ignore_status:
                        libvirt.check_exit_status(ret)
                    if detach_option:
                        options = ("--type %s --mac %s %s" %
                                   (iface_type, mac, detach_option))
                        logging.debug("VM name: %s , Options for Network detach: %s", vm.name, options)
                        ret = virsh.detach_interface(vm.name, options,
                                                     ignore_status=True)
                        if not self.ignore_status:
                            libvirt.check_exit_status(ret)
            elif "diskhotplug" in event:
                for disk_num in range(len(device_source_names)):
                    disk = {}
                    disk_attach_error = False
                    disk_name = os.path.join(path, vm.name, device_source_names[disk_num])
                    device_source = libvirt.create_local_disk(disk_type, disk_name, disk_size, disk_format=disk_format)
                    disk.update({"format": disk_format,
                                 "source": device_source})
                    disk_xml = Disk(disk_type)
                    disk_xml.device = disk_device
                    disk_xml.driver = {"name": disk_driver, "type": disk_format}
                    ret = virsh.attach_disk(vm.name, disk["source"], device_target[disk_num], attach_option, debug=True)
                    if not self.ignore_status:
                        libvirt.check_exit_status(ret, disk_attach_error)
                    if detach_option:
                        ret = virsh.detach_disk(vm.name, device_target[disk_num], extra=detach_option)
                        if not self.ignore_status:
                            libvirt.check_exit_status(ret)
                        libvirt.delete_local_disk(disk_type, disk_name)
            else:
                raise NotImplementedError
            time.sleep(self.itr_sleep_time)