def initialize(self):
        # Check if the kernel supports cpu hotplug
        if utils.running_config():
            utils.check_for_kernel_feature('HOTPLUG_CPU')

        # Check cpu nums, if equals 1, quit.
        if utils.count_cpus() == 1:
            e_msg = 'Single CPU online detected, test not supported.'
            raise error.TestNAError(e_msg)

        # Have a simple and quick check first, FIX me please.
        utils.system('dmesg -c > /dev/null')
        for cpu in utils.cpu_online_map():
            if os.path.isfile('/sys/devices/system/cpu/cpu%s/online' % cpu):
                utils.system('echo 0 > /sys/devices/system/cpu/cpu%s/online' % cpu, 1)
                utils.system('dmesg -c')
                time.sleep(3)
                utils.system('echo 1 > /sys/devices/system/cpu/cpu%s/online' % cpu, 1)
                utils.system('dmesg -c')
                time.sleep(3)
Beispiel #2
0
    def initialize(self):
        # Check if the kernel supports cpu hotplug
        if utils.running_config():
            utils.check_for_kernel_feature('HOTPLUG_CPU')

        # Check cpu nums, if equals 1, quit.
        if utils.count_cpus() == 1:
            e_msg = 'Single CPU online detected, test not supported.'
            raise error.TestNAError(e_msg)

        # Have a simple and quick check first, FIX me please.
        utils.system('dmesg -c > /dev/null')
        for cpu in utils.cpu_online_map():
            if os.path.isfile('/sys/devices/system/cpu/cpu%s/online' % cpu):
                utils.system('echo 0 > /sys/devices/system/cpu/cpu%s/online' % cpu, 1)
                utils.system('dmesg -c')
                time.sleep(3)
                utils.system('echo 1 > /sys/devices/system/cpu/cpu%s/online' % cpu, 1)
                utils.system('dmesg -c')
                time.sleep(3)
Beispiel #3
0
def run(test, params, env):
    """
    Test the command virsh vcpupin

    (1) Get the host and guest cpu count
    (2) Call virsh vcpupin for each vcpu with pinning of each cpu
    (3) Check whether the virsh vcpupin has pinned the respective vcpu to cpu
    """

    def affinity_from_vcpuinfo(vm_name, vcpu):
        """
        This function returns list of the vcpu's affinity from
        virsh vcpuinfo output

        :param vm_name: VM Name to operate on
        :param vcpu: vcpu number for which the affinity is required
        """

        output = virsh.vcpuinfo(vm_name).stdout.rstrip()
        affinity = re.findall('CPU Affinity: +[-y]+', output)
        total_affinity = affinity[int(vcpu)].split()[-1].strip()
        actual_affinity = list(total_affinity)
        return actual_affinity

    def check_vcpupin(vm_name, vcpu, cpu_list, pid, vcpu_pid):
        """
        This function checks the actual and the expected affinity of given vcpu
        and raises error if not matchs

        :param vm_name: VM Name to operate on
        :param vcpu: vcpu number for which the affinity is required
        :param cpu: cpu details for the affinity
        :param pid: VM pid
        :param vcpu: VM cpu pid
        """

        total_cpu = utils.run("ls -d /sys/devices/system/cpu/cpu[0-9]* |wc -l").stdout
        expected_output = utils_test.libvirt.cpus_string_to_affinity_list(
            cpu_list,
            int(total_cpu))
        logging.debug("Expecte affinity: %s", expected_output)
        actual_output = affinity_from_vcpuinfo(vm_name, vcpu)
        logging.debug("Actual affinity in vcpuinfo output: %s", actual_output)

        if expected_output == actual_output:
            logging.info("successfully pinned cpu_list: %s --> vcpu: %s",
                         cpu_list, vcpu)
        else:
            raise error.TestFail("Cpu pinning details not updated properly in"
                                 " virsh vcpuinfo command output")

        if pid is None:
            return
        # Get the actual cpu affinity value in the proc entry
        output = utils_test.libvirt.cpu_allowed_list_by_task(pid, vcpu_pid)
        actual_output_proc = utils_test.libvirt.cpus_string_to_affinity_list(
            output,
            int(total_cpu))
        logging.debug("Actual affinity in guest proc: %s", actual_output_proc)
        if expected_output == actual_output_proc:
            logging.info("successfully pinned vcpu: %s --> cpu: %s"
                         " in respective proc entry", vcpu, cpu_list)
        else:
            raise error.TestFail("Cpu pinning details not updated properly in"
                                 " /proc/%s/task/%s/status" % (pid, vcpu_pid))

    def run_and_check_vcpupin(vm, vm_ref, vcpu, cpu_list, options):
        """
        Run the vcpupin command and then check the result.
        """
        if vm_ref == "name":
            vm_ref = vm.name
        elif vm_ref == "uuid":
            vm_ref = vm.get_uuid()
        # Execute virsh vcpupin command.
        cmdResult = virsh.vcpupin(vm_ref, vcpu, cpu_list, options, debug=True)
        if cmdResult.exit_status:
            if not status_error:
                # Command fail and it is in positive case.
                raise error.TestFail(cmdResult)
            else:
                # Command fail and it is in negative case.
                return
        else:
            if status_error:
                # Command success and it is in negative case.
                raise error.TestFail(cmdResult)
            else:
                # Command success and it is in positive case.
                # "--config" will take effect after VM destroyed.
                pid = None
                vcpu_pid = None
                if options == "--config":
                    virsh.destroy(vm.name)
                else:
                    pid = vm.get_pid()
                    logging.debug("vcpus_pid: %s", vm.get_vcpus_pid())
                    vcpu_pid = vm.get_vcpus_pid()[vcpu]
                # Check the result of vcpupin command.
                check_vcpupin(vm.name, vcpu, cpu_list, pid, vcpu_pid)

    def offline_pin_and_check(vm, vcpu, cpu_list):
        """
        Edit domain xml to pin vcpu and check the result.
        """
        cputune = vm_xml.VMCPUTuneXML()
        cputune.vcpupins = [{'vcpu': str(vcpu), 'cpuset': cpu_list}]
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        vmxml.cputune = cputune
        vmxml.sync()
        utils_misc.wait_for(lambda: vm.state() == "shut off", 10)
        cmdResult = virsh.start(vm.name, debug=True)
        libvirt.check_exit_status(cmdResult, status_error)
        pid = vm.get_pid()
        vcpu_pid = vm.get_vcpus_pid()[vcpu]
        check_vcpupin(vm.name, vcpu, cpu_list, pid, vcpu_pid)

    if not virsh.has_help_command('vcpucount'):
        raise error.TestNAError("This version of libvirt doesn't"
                                " support this test")
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    # Get the variables for vcpupin command.
    vm_ref = params.get("vcpupin_vm_ref", "name")
    options = params.get("vcpupin_options", "--current")
    cpu_list = params.get("vcpupin_cpu_list", "x")
    start_vm = ("yes" == params.get("start_vm", "yes"))
    # Get status of this case.
    status_error = ("yes" == params.get("status_error", "no"))

    # Edit domain xml to pin vcpus
    offline_pin = ("yes" == params.get("offline_pin", "no"))

    # Backup for recovery.
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    # Get the guest vcpu count
    if offline_pin:
        vcpucount_option = "--config --active"
    else:
        vcpucount_option = "--live --active"
    guest_vcpu_count = virsh.vcpucount(vm_name,
                                       vcpucount_option).stdout.strip()

    try:
        # Control multi domain vcpu affinity
        multi_dom = ("yes" == params.get("multi_dom_pin", "no"))
        vm2 = None
        if multi_dom:
            vm_names = params.get("vms").split()
            if len(vm_names) > 1:
                vm2 = env.get_vm(vm_names[1])
            else:
                raise error.TestError("Need more than one domains")
            if not vm2:
                raise error.TestNAError("No %s find" % vm_names[1])
            vm2.destroy()
            vm2xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm2.name)
            vm2xml_backup = vm2xml.copy()
            # Make sure vm2 has the same cpu numbers with vm
            vm2xml.set_vm_vcpus(vm2.name, int(guest_vcpu_count), guest_vcpu_count)
            if start_vm:
                vm2.start()

        # Run cases when guest is shutoff.
        if not offline_pin:
            if vm.is_dead() and not start_vm:
                run_and_check_vcpupin(vm, vm_ref, 0, 0, "")
                return
        # Get the host cpu count
        host_cpu_count = utils.count_cpus()
        cpu_max = int(host_cpu_count) - 1
        if (int(host_cpu_count) < 2) and (not cpu_list == "x"):
            raise error.TestNAError("We need more cpus on host in this case "
                                    "for the cpu_list=%s. But current number "
                                    "of cpu on host is %s."
                                    % (cpu_list, host_cpu_count))

        # Find the alive cpus list
        cpus_list = utils.cpu_online_map()
        logging.info("Active cpus in host are %s", cpus_list)

        # Run test case
        for vcpu in range(int(guest_vcpu_count)):
            if cpu_list == "x":
                for cpu in cpus_list:
                    left_cpus = "0-%s,^%s" % (cpu_max, cpu)
                    if offline_pin:
                        offline_pin_and_check(vm, vcpu, str(cpu))
                        if multi_dom:
                            offline_pin_and_check(vm2, vcpu, left_cpus)
                    else:
                        run_and_check_vcpupin(vm, vm_ref, vcpu, str(cpu),
                                              options)
                        if multi_dom:
                            run_and_check_vcpupin(vm2, "name", vcpu, left_cpus,
                                                  options)
            else:
                if cpu_list == "x-y":
                    cpus = "0-%s" % cpu_max
                elif cpu_list == "x,y":
                    cpus = ','.join(random.sample(cpus_list, 2))
                    logging.info(cpus)
                elif cpu_list == "x-y,^z":
                    cpus = "0-%s,^%s" % (cpu_max, cpu_max)
                elif cpu_list == "r":
                    cpus = "r"
                elif cpu_list == "-1":
                    cpus = "-1"
                elif cpu_list == "out_of_max":
                    cpus = str(cpu_max + 1)
                else:
                    raise error.TestNAError("Cpu_list=%s is not recognized."
                                            % cpu_list)
                if offline_pin:
                    offline_pin_and_check(vm, vcpu, cpus)
                else:
                    run_and_check_vcpupin(vm, vm_ref, vcpu, cpus, options)
    finally:
        # Recover xml of vm.
        vmxml_backup.sync()
        if vm2:
            vm2xml_backup.sync()
Beispiel #4
0
def run(test, params, env):
    """
    Domain CPU management testing.

    1. Prepare a domain for testing, install qemu-guest-ga if needed.
    2. Plug vcpu for the domain.
    3. Checking:
      3.1. Virsh vcpucount.
      3.2. Virsh vcpuinfo.
      3.3. Current vcpu number in domain xml.
      3.4. Virsh vcpupin and vcpupin in domain xml.
      3.5. The vcpu number in domain.
      3.6. Virsh cpu-stats.
    4. Repeat step 3 to check again.
    5. Control domain(save, managedsave, s3, s4, migrate, etc.).
    6. Repeat step 3 to check again.
    7. Recover domain(restore, wakeup, etc.).
    8. Repeat step 3 to check again.
    9. Unplug vcpu for the domain.
    10. Repeat step 3 to check again.
    11. Repeat step 5 to control domain(As BZ#1088216 not fix, skip
        save/managedsave/migrate related actions).
    12. Repeat step 3 to check again.
    13. Repeat step 7 to recover domain.
    14. Repeat step 3 to check again.
    15. Recover test environment.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_operation = params.get("vm_operation", "null")
    vcpu_max_num = params.get("vcpu_max_num")
    vcpu_current_num = params.get("vcpu_current_num")
    vcpu_plug = "yes" == params.get("vcpu_plug", "no")
    vcpu_plug_num = params.get("vcpu_plug_num")
    vcpu_unplug = "yes" == params.get("vcpu_unplug", "no")
    vcpu_unplug_num = params.get("vcpu_unplug_num")
    setvcpu_option = params.get("setvcpu_option", "")
    agent_channel = "yes" == params.get("agent_channel", "yes")
    install_qemuga = "yes" == params.get("install_qemuga", "no")
    start_qemuga = "yes" == params.get("start_qemuga", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    setvcpu_readonly = "yes" == params.get("setvcpu_readonly", "no")
    status_error = "yes" == params.get("status_error", "no")
    pin_before_plug = "yes" == params.get("pin_before_plug", "no")
    pin_after_plug = "yes" == params.get("pin_after_plug", "no")
    pin_before_unplug = "yes" == params.get("pin_before_unplug", "no")
    pin_after_unplug = "yes" == params.get("pin_after_unplug", "no")
    pin_vcpu = params.get("pin_vcpu")
    pin_cpu_list = params.get("pin_cpu_list", "x")
    check_after_plug_fail = "yes" == params.get("check_after_plug_fail", "no")

    # Init expect vcpu count values
    expect_vcpu_num = [
        vcpu_max_num, vcpu_max_num, vcpu_current_num, vcpu_current_num,
        vcpu_current_num
    ]
    if check_after_plug_fail:
        expect_vcpu_num_bk = list(expect_vcpu_num)
    # Init expect vcpu pin values
    expect_vcpupin = {}

    # Init cpu-list for vcpupin
    host_cpu_count = utils.count_cpus()
    if (int(host_cpu_count) < 2) and (not pin_cpu_list == "x"):
        raise error.TestNAError("We need more cpus on host in this case for"
                                " the cpu-list=%s. But current number of cpu"
                                " on host is %s." %
                                (pin_cpu_list, host_cpu_count))

    cpus_list = utils.cpu_online_map()
    logging.info("Active cpus in host are %s", cpus_list)

    cpu_seq_str = ""
    for i in range(len(cpus_list) - 1):
        if int(cpus_list[i]) + 1 == int(cpus_list[i + 1]):
            cpu_seq_str = "%s-%s" % (cpus_list[i], cpus_list[i + 1])
            break

    if pin_cpu_list == "x":
        pin_cpu_list = cpus_list[-1]
    if pin_cpu_list == "x-y":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str
        else:
            pin_cpu_list = "%s-%s" % (cpus_list[0], cpus_list[0])
    elif pin_cpu_list == "x,y":
        pin_cpu_list = "%s,%s" % (cpus_list[0], cpus_list[1])
    elif pin_cpu_list == "x-y,^z":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str + ",^%s" % cpu_seq_str.split('-')[1]
        else:
            pin_cpu_list = "%s,%s,^%s" % (cpus_list[0], cpus_list[1],
                                          cpus_list[0])
    else:
        # Just use the value get from cfg
        pass

    need_mkswap = False
    # Back up domain XML
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    try:
        # Customize domain vcpu number
        if vm.is_alive():
            vm.destroy()
        if agent_channel:
            vmxml.set_agent_channel()
        else:
            vmxml.remove_agent_channels()
        vmxml.sync()

        vmxml.set_vm_vcpus(vm_name, int(vcpu_max_num), int(vcpu_current_num))
        # Do not apply S3/S4 on power
        if 'power' not in cpu_util.get_cpu_arch():
            vmxml.set_pm_suspend(vm_name, "yes", "yes")
        vm.start()

        # Create swap partition/file if nessesary
        if vm_operation == "s4":
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition")
            vm.create_swap_partition()

        # Prepare qemu guest agent
        if install_qemuga:
            vm.prepare_guest_agent(prepare_xml=False, start=start_qemuga)
            vm.setenforce(0)
        else:
            # Remove qemu-guest-agent for negative test
            vm.remove_package('qemu-guest-agent')

        # Run test
        check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin)
        # plug vcpu
        if vcpu_plug:
            # Pin vcpu
            if pin_before_plug:
                result = virsh.vcpupin(vm_name,
                                       pin_vcpu,
                                       pin_cpu_list,
                                       ignore_status=True,
                                       debug=True)
                libvirt.check_exit_status(result)
                expect_vcpupin = {pin_vcpu: pin_cpu_list}

            result = virsh.setvcpus(vm_name,
                                    vcpu_plug_num,
                                    setvcpu_option,
                                    readonly=setvcpu_readonly,
                                    ignore_status=True,
                                    debug=True)
            check_setvcpus_result(result, status_error)

            if setvcpu_option == "--config":
                expect_vcpu_num[2] = vcpu_plug_num
            elif setvcpu_option == "--guest":
                # vcpuset '--guest' only affect vcpu number in guest
                expect_vcpu_num[4] = vcpu_plug_num
            else:
                expect_vcpu_num[3] = vcpu_plug_num
                expect_vcpu_num[4] = vcpu_plug_num
                if not status_error:
                    if not online_new_vcpu(vm, vcpu_plug_num):
                        raise error.TestFail("Fail to enable new added cpu")

            # Pin vcpu
            if pin_after_plug:
                result = virsh.vcpupin(vm_name,
                                       pin_vcpu,
                                       pin_cpu_list,
                                       ignore_status=True,
                                       debug=True)
                libvirt.check_exit_status(result)
                expect_vcpupin = {pin_vcpu: pin_cpu_list}

            if status_error and check_after_plug_fail:
                check_vcpu_number(vm, expect_vcpu_num_bk, {}, setvcpu_option)

            if not status_error:
                if restart_libvirtd:
                    utils_libvirtd.libvirtd_restart()

                # Check vcpu number and related commands
                check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                  setvcpu_option)

                # Control domain
                manipulate_domain(vm_name, vm_operation)

                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)

                # Recover domain
                manipulate_domain(vm_name, vm_operation, recover=True)

                # Resume domain from S4 status may takes long time(QEMU bug),
                # here we wait for 10 mins then skip the remaining part of
                # tests if domain not resume successfully
                try:
                    vm.wait_for_login(timeout=600)
                except Exception, e:
                    raise error.TestWarn("Skip remaining test steps as domain"
                                         " not resume in 10 mins: %s" % e)
                # For hotplug/unplug vcpu without '--config flag, after
                # suspend domain to disk(shut off) and re-start it, the
                # current live vcpu number will recover to orinial value
                if vm_operation == 's4':
                    if setvcpu_option.count("--config"):
                        expect_vcpu_num[3] = vcpu_plug_num
                        expect_vcpu_num[4] = vcpu_plug_num
                    elif setvcpu_option.count("--guest"):
                        expect_vcpu_num[4] = vcpu_plug_num
                    else:
                        expect_vcpu_num[3] = vcpu_current_num
                        expect_vcpu_num[4] = vcpu_current_num
                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)

        # Unplug vcpu
        if vcpu_unplug:
            # Pin vcpu
            if pin_before_unplug:
                result = virsh.vcpupin(vm_name,
                                       pin_vcpu,
                                       pin_cpu_list,
                                       ignore_status=True,
                                       debug=True)
                libvirt.check_exit_status(result)
                # As the vcpu will unplug later, so set expect_vcpupin to empty
                expect_vcpupin = {}

            result = virsh.setvcpus(vm_name,
                                    vcpu_unplug_num,
                                    setvcpu_option,
                                    readonly=setvcpu_readonly,
                                    ignore_status=True,
                                    debug=True)

            try:
                check_setvcpus_result(result, status_error)
            except error.TestNAError:
                raise error.TestWarn("Skip unplug vcpu as it is not supported")

            if setvcpu_option == "--config":
                expect_vcpu_num[2] = vcpu_unplug_num
            elif setvcpu_option == "--guest":
                # vcpuset '--guest' only affect vcpu number in guest
                expect_vcpu_num[4] = vcpu_unplug_num
            else:
                expect_vcpu_num[3] = vcpu_unplug_num
                expect_vcpu_num[4] = vcpu_unplug_num

            # Pin vcpu
            if pin_after_unplug:
                result = virsh.vcpupin(vm_name,
                                       pin_vcpu,
                                       pin_cpu_list,
                                       ignore_status=True,
                                       debug=True)
                libvirt.check_exit_status(result)
                expect_vcpupin = {pin_vcpu: pin_cpu_list}

            if not status_error:
                if restart_libvirtd:
                    utils_libvirtd.libvirtd_restart()

                # Check vcpu number and related commands
                check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                  setvcpu_option)

                # Control domain
                manipulate_domain(vm_name, vm_operation)

                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)

                # Recover domain
                manipulate_domain(vm_name, vm_operation, recover=True)

                # Resume domain from S4 status may takes long time(QEMU bug),
                # here we wait for 10 mins then skip the remaining part of
                # tests if domain not resume successfully
                try:
                    vm.wait_for_login(timeout=600)
                except Exception, e:
                    raise error.TestWarn("Skip remaining test steps as domain"
                                         " not resume in 10 mins: %s" % e)
                # For hotplug/unplug vcpu without '--config flag, after
                # suspend domain to disk(shut off) and re-start it, the
                # current live vcpu number will recover to orinial value
                if vm_operation == 's4':
                    if setvcpu_option.count("--config"):
                        expect_vcpu_num[3] = vcpu_unplug_num
                        expect_vcpu_num[4] = vcpu_unplug_num
                    elif setvcpu_option.count("--guest"):
                        expect_vcpu_num[4] = vcpu_unplug_num
                    else:
                        expect_vcpu_num[3] = vcpu_current_num
                        expect_vcpu_num[4] = vcpu_current_num
                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)
Beispiel #5
0
def run(test, params, env):
    """
    Test emulatorpin tuning

    1) Positive testing
       1.1) get the current emulatorpin parameters for a running/shutoff guest
       1.2) set the current emulatorpin parameters for a running/shutoff guest
    2) Negative testing
       2.1) get emulatorpin parameters for a running/shutoff guest
       2.2) set emulatorpin parameters running/shutoff guest
    """

    # Run test case
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    cgconfig = params.get("cgconfig", "on")
    cpulist = params.get("emulatorpin_cpulist")
    status_error = params.get("status_error", "no")
    change_parameters = params.get("change_parameters", "no")

    # Backup original vm
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    emulatorpin_placement = params.get("emulatorpin_placement", "")
    if emulatorpin_placement:
        vm.destroy()
        vmxml.placement = emulatorpin_placement
        vmxml.sync()
        vm.start()

    test_dicts = dict(params)
    test_dicts['vm'] = vm

    host_cpus = utils.count_cpus()
    test_dicts['host_cpus'] = host_cpus
    cpu_max = int(host_cpus) - 1

    cpu_list = None

    # Assemble cpu list for positive test
    if status_error == "no":
        if cpulist is None:
            pass
        elif cpulist == "x":
            cpulist = random.choice(utils.cpu_online_map())
        elif cpulist == "x-y":
            # By default, emulator is pined to all cpus, and element
            # 'cputune/emulatorpin' may not exist in VM's XML.
            # And libvirt will do nothing if pin emulator to the same
            # cpus, that means VM's XML still have that element.
            # So for testing, we should avoid that value(0-$cpu_max).
            if cpu_max < 2:
                cpulist = "0-0"
            else:
                cpulist = "0-%s" % (cpu_max - 1)
        elif cpulist == "x,y":
            cpulist = ','.join(random.sample(utils.cpu_online_map(), 2))
        elif cpulist == "x-y,^z":
            cpulist = "0-%s,^%s" % (cpu_max, cpu_max)
        elif cpulist == "-1":
            cpulist = "-1"
        elif cpulist == "out_of_max":
            cpulist = str(cpu_max + 1)
        else:
            raise error.TestNAError("CPU-list=%s is not recognized." % cpulist)
    test_dicts['emulatorpin_cpulist'] = cpulist
    if cpulist:
        cpu_list = cpus_parser(cpulist)
        test_dicts['cpu_list'] = cpu_list
        logging.debug("CPU list is %s", cpu_list)

    cg = utils_cgroup.CgconfigService()

    if cgconfig == "off":
        if cg.cgconfig_is_running():
            cg.cgconfig_stop()

    # positive and negative testing #########
    try:
        if status_error == "no":
            if change_parameters == "no":
                get_emulatorpin_parameter(test_dicts)
            else:
                set_emulatorpin_parameter(test_dicts)

        if status_error == "yes":
            if change_parameters == "no":
                get_emulatorpin_parameter(test_dicts)
            else:
                set_emulatorpin_parameter(test_dicts)
    finally:
        # Recover cgconfig and libvirtd service
        if not cg.cgconfig_is_running():
            cg.cgconfig_start()
            utils_libvirtd.libvirtd_restart()
        # Recover vm.
        vmxml_backup.sync()
def run(test, params, env):
    """
    Test emulatorpin tuning

    1) Positive testing
       1.1) get the current emulatorpin parameters for a running/shutoff guest
       1.2) set the current emulatorpin parameters for a running/shutoff guest
    2) Negative testing
       2.1) get emulatorpin parameters for a running/shutoff guest
       2.2) set emulatorpin parameters running/shutoff guest
    """

    # Run test case
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    cgconfig = params.get("cgconfig", "on")
    cpulist = params.get("emulatorpin_cpulist")
    status_error = params.get("status_error", "no")
    change_parameters = params.get("change_parameters", "no")

    # Backup original vm
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    emulatorpin_placement = params.get("emulatorpin_placement", "")
    if emulatorpin_placement:
        vm.destroy()
        vmxml.placement = emulatorpin_placement
        vmxml.sync()
        vm.start()

    test_dicts = dict(params)
    test_dicts['vm'] = vm

    host_cpus = utils.count_cpus()
    test_dicts['host_cpus'] = host_cpus
    cpu_max = int(host_cpus) - 1

    cpu_list = None

    # Assemble cpu list for positive test
    if status_error == "no":
        if cpulist is None:
            pass
        elif cpulist == "x":
            cpulist = random.choice(utils.cpu_online_map())
        elif cpulist == "x-y":
            cpulist = "0-%s" % cpu_max
        elif cpulist == "x,y":
            cpulist = ','.join(random.sample(utils.cpu_online_map(), 2))
        elif cpulist == "x-y,^z":
            cpulist = "0-%s,^%s" % (cpu_max, cpu_max)
        elif cpulist == "-1":
            cpulist = "-1"
        elif cpulist == "out_of_max":
            cpulist = str(cpu_max + 1)
        else:
            raise error.TestNAError("CPU-list=%s is not recognized."
                                    % cpulist)
    test_dicts['emulatorpin_cpulist'] = cpulist
    if cpulist:
        cpu_list = cpus_parser(cpulist)
        test_dicts['cpu_list'] = cpu_list
        logging.debug("CPU list is %s", cpu_list)

    cg = utils_cgroup.CgconfigService()

    if cgconfig == "off":
        if cg.cgconfig_is_running():
            cg.cgconfig_stop()

    # positive and negative testing #########
    try:
        if status_error == "no":
            if change_parameters == "no":
                get_emulatorpin_parameter(test_dicts)
            else:
                set_emulatorpin_parameter(test_dicts)

        if status_error == "yes":
            if change_parameters == "no":
                get_emulatorpin_parameter(test_dicts)
            else:
                set_emulatorpin_parameter(test_dicts)
    finally:
        # Recover cgconfig and libvirtd service
        if not cg.cgconfig_is_running():
            cg.cgconfig_start()
            utils_libvirtd.libvirtd_restart()
        # Recover vm.
        vmxml_backup.sync()
Beispiel #7
0
def run(test, params, env):
    """
    Test the command virsh vcpupin

    (1) Get the host and guest cpu count
    (2) Call virsh vcpupin for each vcpu with pinning of each cpu
    (3) Check whether the virsh vcpupin has pinned the respective vcpu to cpu
    """
    def affinity_from_vcpuinfo(vm_name, vcpu):
        """
        This function returns list of the vcpu's affinity from
        virsh vcpuinfo output

        :param vm_name: VM Name to operate on
        :param vcpu: vcpu number for which the affinity is required
        """

        output = virsh.vcpuinfo(vm_name).stdout.rstrip()
        affinity = re.findall('CPU Affinity: +[-y]+', output)
        total_affinity = affinity[int(vcpu)].split()[-1].strip()
        actual_affinity = list(total_affinity)
        return actual_affinity

    def affinity_from_vcpupin(vm_name, vcpu):
        """
        This function returns list of vcpu's affinity from vcpupin output

        :param vm_name: VM Name
        :param vcpu: VM cpu pid
        :return : list of affinity to vcpus
        """
        total_cpu = process.run(
            "ls -d /sys/devices/system/cpu/cpu[0-9]* |wc -l",
            shell=True).stdout.strip()
        vcpus_affinity = {}
        output = virsh.vcpupin(vm_name).stdout
        for item in output.split('\n')[2:-2]:
            vcpus_affinity[item.split(':')[0].strip()] = item.split(
                ':')[1].strip()
        return utils_test.libvirt.cpus_string_to_affinity_list(
            vcpus_affinity[str(vcpu)], int(total_cpu))

    def check_vcpupin(vm_name, vcpu, cpu_list, pid, vcpu_pid):
        """
        This function checks the actual and the expected affinity of given vcpu
        and raises error if not matchs

        :param vm_name: VM Name to operate on
        :param vcpu: vcpu number for which the affinity is required
        :param cpu: cpu details for the affinity
        :param pid: VM pid
        :param vcpu: VM cpu pid
        """

        total_cpu = process.run(
            "ls -d /sys/devices/system/cpu/cpu[0-9]* |wc -l",
            shell=True).stdout.strip()
        logging.debug("Debug: cpulist %s", cpu_list)
        expected_output = utils_test.libvirt.cpus_string_to_affinity_list(
            cpu_list, int(total_cpu))
        logging.debug("Expected affinity: %s", expected_output)

        # Check for affinity value from vcpuinfo output
        actual_output = affinity_from_vcpuinfo(vm_name, vcpu)
        logging.debug("Actual affinity in vcpuinfo output: %s", actual_output)
        if expected_output == actual_output:
            logging.info("successfully pinned cpu_list: %s --> vcpu: %s",
                         cpu_list, vcpu)
        else:
            raise exceptions.TestFail(
                "Cpu pinning details not updated properly in"
                " virsh vcpuinfo command output")

        # Check for affinity value from vcpupin output
        actual_output_vcpupin = affinity_from_vcpupin(vm_name, vcpu)
        logging.debug("Actual affinity in vcpupin output: %s",
                      actual_output_vcpupin)
        if expected_output == actual_output_vcpupin:
            logging.info("successfully pinned cpu_list: %s --> vcpu: %s",
                         cpu_list, vcpu)
        else:
            raise exceptions.TestFail(
                "Cpu pinning details not updated properly in"
                " virsh vcpupin command output")

        if pid is None:
            return
        # Get the actual cpu affinity value in the proc entry
        output = utils_test.libvirt.cpu_allowed_list_by_task(pid, vcpu_pid)
        actual_output_proc = utils_test.libvirt.cpus_string_to_affinity_list(
            output, int(total_cpu))
        logging.debug("Actual affinity in guest proc: %s", actual_output_proc)
        if expected_output == actual_output_proc:
            logging.info(
                "successfully pinned vcpu: %s --> cpu: %s"
                " in respective proc entry", vcpu, cpu_list)
        else:
            raise exceptions.TestFail("Cpu pinning details not "
                                      "updated properly in /proc/"
                                      "%s/task/%s/status" % (pid, vcpu_pid))

    def run_and_check_vcpupin(vm, vm_ref, vcpu, cpu_list, options):
        """
        Run the vcpupin command and then check the result.
        """
        if vm_ref == "name":
            vm_ref = vm.name
        elif vm_ref == "uuid":
            vm_ref = vm.get_uuid()
        # Execute virsh vcpupin command.
        cmdResult = virsh.vcpupin(vm_ref, vcpu, cpu_list, options, debug=True)
        if cmdResult.exit_status:
            if not status_error:
                # Command fail and it is in positive case.
                raise exceptions.TestFail(cmdResult)
            else:
                # Command fail and it is in negative case.
                return
        else:
            if status_error:
                # Command success and it is in negative case.
                raise exceptions.TestFail(cmdResult)
            else:
                # Command success and it is in positive case.
                # "--config" will take effect after VM destroyed.
                pid = None
                vcpu_pid = None
                if options == "--config":
                    virsh.destroy(vm.name)
                else:
                    pid = vm.get_pid()
                    logging.debug("vcpus_pid: %s", vm.get_vcpus_pid())
                    vcpu_pid = vm.get_vcpus_pid()[vcpu]
                # Check the result of vcpupin command.
                check_vcpupin(vm.name, vcpu, cpu_list, pid, vcpu_pid)

    def offline_pin_and_check(vm, vcpu, cpu_list):
        """
        Edit domain xml to pin vcpu and check the result.
        """
        cputune = vm_xml.VMCPUTuneXML()
        cputune.vcpupins = [{'vcpu': str(vcpu), 'cpuset': cpu_list}]
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        vmxml.cputune = cputune
        vmxml.sync()
        utils_misc.wait_for(lambda: vm.state() == "shut off", 10)
        cmdResult = virsh.start(vm.name, debug=True)
        libvirt.check_exit_status(cmdResult, status_error)
        pid = vm.get_pid()
        vcpu_pid = vm.get_vcpus_pid()[vcpu]
        check_vcpupin(vm.name, vcpu, cpu_list, pid, vcpu_pid)

    if not virsh.has_help_command('vcpucount'):
        raise exceptions.TestSkipError("This version of libvirt doesn't"
                                       " support this test")
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    # Get the variables for vcpupin command.
    vm_ref = params.get("vcpupin_vm_ref", "name")
    options = params.get("vcpupin_options", "--current")
    cpu_list = params.get("vcpupin_cpu_list", "x")
    start_vm = ("yes" == params.get("start_vm", "yes"))
    vcpupin_initial = ("yes" == params.get("vcpupin_initial", "no"))

    # Get status of this case.
    status_error = ("yes" == params.get("status_error", "no"))

    # Edit domain xml to pin vcpus
    offline_pin = ("yes" == params.get("offline_pin", "no"))

    # Backup for recovery.
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    if start_vm and vm.state() == "shut off":
        cmdResult = virsh.start(vm.name, debug=True)
        libvirt.check_exit_status(cmdResult, status_error)

    # Get the guest vcpu count
    if offline_pin:
        vcpucount_option = "--config --active"
    else:
        vcpucount_option = "--live --active"
    guest_vcpu_count = virsh.vcpucount(vm_name,
                                       vcpucount_option).stdout.strip()

    # Find the alive cpus list
    cpus_list = utils.cpu_online_map()
    logging.info("Active cpus in host are %s", cpus_list)

    try:
        # Control multi domain vcpu affinity
        multi_dom = ("yes" == params.get("multi_dom_pin", "no"))
        vm2 = None
        # Before doing any vcpupin actions, lets check whether
        # initial pinning state is fine
        if vcpupin_initial:
            pid = vm.get_pid()
            logging.debug("vcpus_pid: %s vcpu count: %s", vm.get_vcpus_pid(),
                          guest_vcpu_count)
            for vcpu in range(int(guest_vcpu_count)):
                vcpu_pid = vm.get_vcpus_pid()[vcpu]
                # Check the result of vcpupin command.
                check_vcpupin(vm.name, vcpu, str(','.join(cpus_list)), pid,
                              vcpu_pid)
            return

        if multi_dom:
            vm_names = params.get("vms").split()
            if len(vm_names) > 1:
                vm2 = env.get_vm(vm_names[1])
            else:
                raise exceptions.TestError("Need more than one domains")
            if not vm2:
                raise exceptions.TestSkipError("No %s find" % vm_names[1])
            vm2.destroy()
            vm2xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm2.name)
            vm2xml_backup = vm2xml.copy()
            # Make sure vm2 has the same cpu numbers with vm
            vm2xml.set_vm_vcpus(vm2.name, int(guest_vcpu_count),
                                guest_vcpu_count)
            if start_vm:
                vm2.start()

        # Run cases when guest is shutoff.
        if not offline_pin:
            if vm.is_dead() and not start_vm:
                run_and_check_vcpupin(vm, vm_ref, 0, 0, "")
                return
        # Get the host cpu count
        host_cpu_count = utils.count_cpus()
        cpu_max = int(host_cpu_count) - 1
        if (int(host_cpu_count) < 2) and (not cpu_list == "x"):
            raise exceptions.TestSkipError(
                "We need more cpus on host in this "
                "case for the cpu_list=%s. But "
                "current number of cpu on host is %s." %
                (cpu_list, host_cpu_count))

        # Run test case
        for vcpu in range(int(guest_vcpu_count)):
            if cpu_list == "x":
                for cpu in cpus_list:
                    left_cpus = "0-%s,^%s" % (cpu_max, cpu)
                    if offline_pin:
                        offline_pin_and_check(vm, vcpu, str(cpu))
                        if multi_dom:
                            offline_pin_and_check(vm2, vcpu, left_cpus)
                    else:
                        run_and_check_vcpupin(vm, vm_ref, vcpu, str(cpu),
                                              options)
                        if multi_dom:
                            run_and_check_vcpupin(vm2, "name", vcpu, left_cpus,
                                                  options)
            else:
                if cpu_list == "x-y":
                    cpus = "0-%s" % cpu_max
                elif cpu_list == "x,y":
                    cpus = ','.join(random.sample(cpus_list, 2))
                    logging.info(cpus)
                elif cpu_list == "x-y,^z":
                    cpus = "0-%s,^%s" % (cpu_max, cpu_max)
                elif cpu_list == "r":
                    cpus = "r"
                elif cpu_list == "-1":
                    cpus = "-1"
                elif cpu_list == "out_of_max":
                    cpus = str(cpu_max + 1)
                else:
                    raise exceptions.TestSkipError(
                        "Cpu_list=%s is not recognized." % cpu_list)
                if offline_pin:
                    offline_pin_and_check(vm, vcpu, cpus)
                else:
                    run_and_check_vcpupin(vm, vm_ref, vcpu, cpus, options)
    finally:
        # Recover xml of vm.
        vmxml_backup.sync()
        if vm2:
            vm2xml_backup.sync()
def run(test, params, env):
    """
    Domain CPU management testing.

    1. Prepare a domain for testing, install qemu-guest-ga if needed.
    2. Plug vcpu for the domain.
    3. Checking:
      3.1. Virsh vcpucount.
      3.2. Virsh vcpuinfo.
      3.3. Current vcpu number in domain xml.
      3.4. Virsh vcpupin and vcpupin in domain xml.
      3.5. The vcpu number in domain.
      3.6. Virsh cpu-stats.
    4. Repeat step 3 to check again.
    5. Control domain(save, managedsave, s3, s4, migrate, etc.).
    6. Repeat step 3 to check again.
    7. Recover domain(restore, wakeup, etc.).
    8. Repeat step 3 to check again.
    9. Unplug vcpu for the domain.
    10. Repeat step 3 to check again.
    11. Repeat step 5 to control domain(As BZ#1088216 not fix, skip
        save/managedsave/migrate related actions).
    12. Repeat step 3 to check again.
    13. Repeat step 7 to recover domain.
    14. Repeat step 3 to check again.
    15. Recover test environment.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_operation = params.get("vm_operation", "null")
    vcpu_max_num = params.get("vcpu_max_num")
    vcpu_current_num = params.get("vcpu_current_num")
    vcpu_plug = "yes" == params.get("vcpu_plug", "no")
    vcpu_plug_num = params.get("vcpu_plug_num")
    vcpu_unplug = "yes" == params.get("vcpu_unplug", "no")
    vcpu_unplug_num = params.get("vcpu_unplug_num")
    setvcpu_option = params.get("setvcpu_option", "")
    agent_channel = "yes" == params.get("agent_channel", "yes")
    install_qemuga = "yes" == params.get("install_qemuga", "no")
    start_qemuga = "yes" == params.get("start_qemuga", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    setvcpu_readonly = "yes" == params.get("setvcpu_readonly", "no")
    status_error = "yes" == params.get("status_error", "no")
    pin_before_plug = "yes" == params.get("pin_before_plug", "no")
    pin_after_plug = "yes" == params.get("pin_after_plug", "no")
    pin_before_unplug = "yes" == params.get("pin_before_unplug", "no")
    pin_after_unplug = "yes" == params.get("pin_after_unplug", "no")
    pin_vcpu = params.get("pin_vcpu")
    pin_cpu_list = params.get("pin_cpu_list", "x")
    check_after_plug_fail = "yes" == params.get("check_after_plug_fail", "no")

    # Init expect vcpu count values
    expect_vcpu_num = [vcpu_max_num, vcpu_max_num, vcpu_current_num,
                       vcpu_current_num, vcpu_current_num]
    if check_after_plug_fail:
        expect_vcpu_num_bk = list(expect_vcpu_num)
    # Init expect vcpu pin values
    expect_vcpupin = {}

    # Init cpu-list for vcpupin
    host_cpu_count = utils.count_cpus()
    if (int(host_cpu_count) < 2) and (not pin_cpu_list == "x"):
        raise error.TestNAError("We need more cpus on host in this case for"
                                " the cpu-list=%s. But current number of cpu"
                                " on host is %s."
                                % (pin_cpu_list, host_cpu_count))

    cpus_list = utils.cpu_online_map()
    logging.info("Active cpus in host are %s", cpus_list)

    cpu_seq_str = ""
    for i in range(len(cpus_list) - 1):
        if int(cpus_list[i]) + 1 == int(cpus_list[i + 1]):
            cpu_seq_str = "%s-%s" % (cpus_list[i], cpus_list[i + 1])
            break

    if pin_cpu_list == "x":
        pin_cpu_list = cpus_list[-1]
    if pin_cpu_list == "x-y":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str
        else:
            pin_cpu_list = "%s-%s" % (cpus_list[0], cpus_list[0])
    elif pin_cpu_list == "x,y":
        pin_cpu_list = "%s,%s" % (cpus_list[0], cpus_list[1])
    elif pin_cpu_list == "x-y,^z":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str + ",^%s" % cpu_seq_str.split('-')[1]
        else:
            pin_cpu_list = "%s,%s,^%s" % (cpus_list[0], cpus_list[1],
                                          cpus_list[0])
    else:
        # Just use the value get from cfg
        pass

    need_mkswap = False
    # Back up domain XML
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    try:
        # Customize domain vcpu number
        if vm.is_alive():
            vm.destroy()
        if agent_channel:
            vmxml.set_agent_channel()
        else:
            vmxml.remove_agent_channels()
        vmxml.sync()

        vmxml.set_vm_vcpus(vm_name, int(vcpu_max_num), int(vcpu_current_num))

        # Do not apply S3/S4 on power
        if 'power' not in cpu_util.get_cpu_arch():
            vmxml.set_pm_suspend(vm_name, "yes", "yes")
        vm.start()

        # Create swap partition/file if nessesary
        if vm_operation == "s4":
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition")
            vm.create_swap_partition()

        # Prepare qemu guest agent
        if install_qemuga:
            vm.prepare_guest_agent(prepare_xml=False, start=start_qemuga)
            vm.setenforce(0)
        else:
            # Remove qemu-guest-agent for negative test
            vm.remove_package('qemu-guest-agent')

        # Run test
        check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin)
        # plug vcpu
        if vcpu_plug:
            # Pin vcpu
            if pin_before_plug:
                result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                       ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                expect_vcpupin = {pin_vcpu: pin_cpu_list}

            result = virsh.setvcpus(vm_name, vcpu_plug_num, setvcpu_option,
                                    readonly=setvcpu_readonly,
                                    ignore_status=True, debug=True)
            check_setvcpus_result(result, status_error)

            if setvcpu_option == "--config":
                expect_vcpu_num[2] = vcpu_plug_num
            elif setvcpu_option == "--guest":
                # vcpuset '--guest' only affect vcpu number in guest
                expect_vcpu_num[4] = vcpu_plug_num
            else:
                expect_vcpu_num[3] = vcpu_plug_num
                expect_vcpu_num[4] = vcpu_plug_num
                if not status_error:
                    if not online_new_vcpu(vm, vcpu_plug_num):
                        raise error.TestFail("Fail to enable new added cpu")

            # Pin vcpu
            if pin_after_plug:
                result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                       ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                expect_vcpupin = {pin_vcpu: pin_cpu_list}

            if status_error and check_after_plug_fail:
                check_vcpu_number(vm, expect_vcpu_num_bk, {}, setvcpu_option)

            if not status_error:
                if restart_libvirtd:
                    utils_libvirtd.libvirtd_restart()

                # Check vcpu number and related commands
                check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                  setvcpu_option)

                # Control domain
                manipulate_domain(vm_name, vm_operation)

                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)

                # Recover domain
                manipulate_domain(vm_name, vm_operation, recover=True)

                # Resume domain from S4 status may takes long time(QEMU bug),
                # here we wait for 10 mins then skip the remaining part of
                # tests if domain not resume successfully
                try:
                    vm.wait_for_login(timeout=600)
                except Exception, e:
                    raise error.TestWarn("Skip remaining test steps as domain"
                                         " not resume in 10 mins: %s" % e)
                # For hotplug/unplug vcpu without '--config flag, after
                # suspend domain to disk(shut off) and re-start it, the
                # current live vcpu number will recover to orinial value
                if vm_operation == 's4':
                    if setvcpu_option.count("--config"):
                        expect_vcpu_num[3] = vcpu_plug_num
                        expect_vcpu_num[4] = vcpu_plug_num
                    elif setvcpu_option.count("--guest"):
                        expect_vcpu_num[4] = vcpu_plug_num
                    else:
                        expect_vcpu_num[3] = vcpu_current_num
                        expect_vcpu_num[4] = vcpu_current_num
                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)

        # Unplug vcpu
        # Since QEMU 2.2.0, by default all current vcpus are non-hotpluggable
        # when VM started , and it required that vcpu 0(id=1) is always
        # present and non-hotpluggable, which means we can't hotunplug these
        # vcpus directly. So we can either hotplug more vcpus before we do
        # hotunplug, or modify the 'hotpluggable' attribute to 'yes' of the
        # vcpus except vcpu 0, to make sure libvirt can find appropriate
        # hotpluggable vcpus to reach the desired target vcpu count. For
        # simple prepare step, here we choose to hotplug more vcpus.
        if vcpu_unplug:
            if setvcpu_option == "--live":
                logging.info("Hotplug vcpu to the maximum count to make sure"
                             " all these new plugged vcpus are hotunpluggable")
                result = virsh.setvcpus(vm_name, vcpu_max_num, '--live',
                                        debug=True)
                libvirt.check_exit_status(result)
            # Pin vcpu
            if pin_before_unplug:
                result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                       ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                # As the vcpu will unplug later, so set expect_vcpupin to empty
                expect_vcpupin = {}

            result = virsh.setvcpus(vm_name, vcpu_unplug_num, setvcpu_option,
                                    readonly=setvcpu_readonly,
                                    ignore_status=True, debug=True)

            try:
                check_setvcpus_result(result, status_error)
            except error.TestNAError:
                raise error.TestWarn("Skip unplug vcpu as it is not supported")

            if setvcpu_option == "--config":
                expect_vcpu_num[2] = vcpu_unplug_num
            elif setvcpu_option == "--guest":
                # vcpuset '--guest' only affect vcpu number in guest
                expect_vcpu_num[4] = vcpu_unplug_num
            else:
                expect_vcpu_num[3] = vcpu_unplug_num
                expect_vcpu_num[4] = vcpu_unplug_num

            # Pin vcpu
            if pin_after_unplug:
                result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                       ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                expect_vcpupin = {pin_vcpu: pin_cpu_list}

            if not status_error:
                if restart_libvirtd:
                    utils_libvirtd.libvirtd_restart()

                # Check vcpu number and related commands
                check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                  setvcpu_option)

                # Control domain
                manipulate_domain(vm_name, vm_operation)

                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)

                # Recover domain
                manipulate_domain(vm_name, vm_operation, recover=True)

                # Resume domain from S4 status may takes long time(QEMU bug),
                # here we wait for 10 mins then skip the remaining part of
                # tests if domain not resume successfully
                try:
                    vm.wait_for_login(timeout=600)
                except Exception, e:
                    raise error.TestWarn("Skip remaining test steps as domain"
                                         " not resume in 10 mins: %s" % e)
                # For hotplug/unplug vcpu without '--config flag, after
                # suspend domain to disk(shut off) and re-start it, the
                # current live vcpu number will recover to orinial value
                if vm_operation == 's4':
                    if setvcpu_option.count("--config"):
                        expect_vcpu_num[3] = vcpu_unplug_num
                        expect_vcpu_num[4] = vcpu_unplug_num
                    elif setvcpu_option.count("--guest"):
                        expect_vcpu_num[4] = vcpu_unplug_num
                    else:
                        expect_vcpu_num[3] = vcpu_current_num
                        expect_vcpu_num[4] = vcpu_current_num
                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)
def run(test, params, env):
    """
    Test vcpupin while numad is running
    """
    vcpu_placement = params.get("vcpu_placement")
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)

    # Prepare numatune memory parameter dict
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    libvirtd = utils_libvirtd.Libvirtd()
    libvirtd.start()

    try:
        # Get host numa node list
        host_numa_node = utils_misc.NumaInfo()
        node_list = host_numa_node.online_nodes
        logging.debug("host node list is %s", node_list)
        if len(node_list) < 2:
            test.cancel('Online NUMA nodes less than 2')
        node_a, node_b = min(node_list), max(node_list)
        numa_memory.update({'nodeset': '%d,%d' % (node_a, node_b)})
        # Start numad
        try:
            utils.run("service numad start")
        except error.CmdError, e:
            # Bug 1218149 closed as not a bug, workaround this as in bug
            # comment 12
            logging.debug("start numad failed with %s", e)
            logging.debug("remove message queue of id 0 and try again")
            utils.run("ipcrm msg 0", ignore_status=True)
            utils.run("service numad start")

        # Start vm and do vcpupin
        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        vmxml.placement = vcpu_placement
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()
        vm.start()
        vm.wait_for_login()

        # Test vcpupin to the alive cpus list
        cpus_list = utils.cpu_online_map()
        logging.info("active cpus in host are %s", cpus_list)
        for cpu in cpus_list:
            ret = virsh.vcpupin(vm_name,
                                0,
                                cpu,
                                debug=True,
                                ignore_status=True)
            if ret.exit_status:
                logging.error("related bug url: %s", bug_url)
                raise error.TestFail("vcpupin failed: %s" % ret.stderr)
            virsh.vcpuinfo(vm_name, debug=True)
Beispiel #10
0
def run(test, params, env):
    """
    Test the command virsh nodecpustats

    (1) Call the virsh nodecpustats command for all cpu host cpus
        separately
    (2) Get the output
    (3) Check the against /proc/stat output(o) for respective cpu
        user: o[0] + o[1]
        system: o[2] + o[5] + o[6]
        idle: o[3]
        iowait: o[4]
    (4) Call the virsh nodecpustats command with an unexpected option
    (5) Call the virsh nodecpustats command with libvirtd service stop
    """

    def virsh_check_nodecpustats_percpu(actual_stats):
        """
        Check the acual nodecpustats output value
        total time <= system uptime
        """

        # Normalise to seconds from nano seconds
        total = float((actual_stats['system'] + actual_stats['user'] +
                       actual_stats['idle'] + actual_stats['iowait']) / (10 ** 9))
        uptime = float(utils.get_uptime())
        if not total <= uptime:
            raise error.TestFail("Commands 'virsh nodecpustats' not succeeded"
                                 " as total time: %f is more"
                                 " than uptime: %f" % (total, uptime))
        return True

    def virsh_check_nodecpustats(actual_stats, cpu_count):
        """
        Check the acual nodecpustats output value
        total time <= system uptime
        """

        # Normalise to seconds from nano seconds and get for one cpu
        total = float(((actual_stats['system'] + actual_stats['user'] +
                        actual_stats['idle'] + actual_stats['iowait']) / (10 ** 9)) / (
                      cpu_count))
        uptime = float(utils.get_uptime())
        if not total <= uptime:
            raise error.TestFail("Commands 'virsh nodecpustats' not succeeded"
                                 " as total time: %f is more"
                                 " than uptime: %f" % (total, uptime))
        return True

    def virsh_check_nodecpustats_percentage(actual_per):
        """
        Check the actual nodecpustats percentage adds up to 100%
        """

        total = int(round(actual_per['user'] + actual_per['system'] +
                          actual_per['idle'] + actual_per['iowait']))

        if not total == 100:
            raise error.TestFail("Commands 'virsh nodecpustats' not succeeded"
                                 " as the total percentage value: %d"
                                 " is not equal 100" % total)

    def parse_output(output):
        """
        To get the output parsed into a dictionary
        :param virsh command output

        :return: dict of user,system,idle,iowait times
        """

        # From the beginning of a line, group 1 is one or more word-characters,
        # followed by zero or more whitespace characters and a ':',
        # then one or more whitespace characters,
        # followed by group 2, which is one or more digit characters,
        # e.g as below
        # user:                  6163690000000
        #
        regex_obj = re.compile(r"^(\w+)\s*:\s+(\d+)")
        actual = {}

        for line in output.stdout.split('\n'):
            match_obj = regex_obj.search(line)
            # Due to the extra space in the list
            if match_obj is not None:
                name = match_obj.group(1)
                value = match_obj.group(2)
                actual[name] = int(value)
        return actual

    def parse_percentage_output(output):
        """
        To get the output parsed into a dictionary
        :param virsh command output

        :return: dict of user,system,idle,iowait times
        """

        # From the beginning of a line, group 1 is one or more word-characters,
        # followed by zero or more whitespace characters and a ':',
        # then one or more whitespace characters,
        # followed by group 2, which is one or more digit characters,
        # e.g as below
        # user:             1.5%
        #
        regex_obj = re.compile(r"^(\w+)\s*:\s+(\d+.\d+)")
        actual_percentage = {}

        for line in output.stdout.split('\n'):
            match_obj = regex_obj.search(line)
            # Due to the extra space in the list
            if match_obj is not None:
                name = match_obj.group(1)
                value = match_obj.group(2)
                actual_percentage[name] = float(value)
        return actual_percentage

    # Initialize the variables
    itr = int(params.get("inner_test_iterations"))
    option = params.get("virsh_cpunodestats_options")
    invalid_cpunum = params.get("invalid_cpunum")
    status_error = params.get("status_error")
    libvirtd = params.get("libvirtd", "on")

    # Prepare libvirtd service
    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    # Get the host cpu list
    host_cpus_list = utils.cpu_online_map()

    # Run test case for 5 iterations default can be changed in subtests.cfg
    # file
    for i in range(itr):

        if status_error == "yes":
            if invalid_cpunum == "yes":
                option = "--cpu %s" % (len(host_cpus_list) + 1)
            output = virsh.nodecpustats(ignore_status=True, option=option)
            status = output.exit_status

            if status == 0:
                if libvirtd == "off":
                    utils_libvirtd.libvirtd_start()
                    raise error.TestFail("Command 'virsh nodecpustats' "
                                         "succeeded with libvirtd service "
                                         "stopped, incorrect")
                else:
                    raise error.TestFail("Command 'virsh nodecpustats %s' "
                                         "succeeded (incorrect command)" % option)

        elif status_error == "no":
            # Run the testcase for each cpu to get the cpu stats
            for cpu in host_cpus_list:
                option = "--cpu %s" % cpu
                output = virsh.nodecpustats(ignore_status=True, option=option)
                status = output.exit_status

                if status == 0:
                    actual_value = parse_output(output)
                    virsh_check_nodecpustats_percpu(actual_value)
                else:
                    raise error.TestFail("Command 'virsh nodecpustats %s'"
                                         "not succeeded" % option)

            # Run the test case for each cpu to get the cpu stats in percentage
            for cpu in host_cpus_list:
                option = "--cpu %s --percent" % cpu
                output = virsh.nodecpustats(ignore_status=True, option=option)
                status = output.exit_status

                if status == 0:
                    actual_value = parse_percentage_output(output)
                    virsh_check_nodecpustats_percentage(actual_value)
                else:
                    raise error.TestFail("Command 'virsh nodecpustats %s'"
                                         " not succeeded" % option)

            option = ''
            # Run the test case for total cpus to get the cpus stats
            output = virsh.nodecpustats(ignore_status=True, option=option)
            status = output.exit_status

            if status == 0:
                actual_value = parse_output(output)
                virsh_check_nodecpustats(actual_value, len(host_cpus_list))
            else:
                raise error.TestFail("Command 'virsh nodecpustats %s'"
                                     " not succeeded" % option)

            # Run the test case for the total cpus to get the stats in
            # percentage
            option = "--percent"
            output = virsh.nodecpustats(ignore_status=True, option=option)
            status = output.exit_status

            if status == 0:
                actual_value = parse_percentage_output(output)
                virsh_check_nodecpustats_percentage(actual_value)
            else:
                raise error.TestFail("Command 'virsh nodecpustats %s'"
                                     " not succeeded" % option)

    # Recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()
    test_dicts = dict(params)
    test_dicts['vm'] = vm

    host_cpus = utils.count_cpus()
    test_dicts['host_cpus'] = host_cpus
    cpu_max = int(host_cpus) - 1

    cpu_list = None

    # Assemble cpu list for positive test
    if status_error == "no":
        if cpulist is None:
            pass
        elif cpulist == "x":
            cpulist = random.choice(utils.cpu_online_map())
        elif cpulist == "x-y":
            # By default, emulator is pined to all cpus, and element
            # 'cputune/emulatorpin' may not exist in VM's XML.
            # And libvirt will do nothing if pin emulator to the same
            # cpus, that means VM's XML still have that element.
            # So for testing, we should avoid that value(0-$cpu_max).
            if cpu_max < 2:
                cpulist = "0-0"
            else:
                cpulist = "0-%s" % (cpu_max - 1)
        elif cpulist == "x,y":
            cpulist = ','.join(random.sample(utils.cpu_online_map(), 2))
        elif cpulist == "x-y,^z":
            cpulist = "0-%s,^%s" % (cpu_max, cpu_max)
        elif cpulist == "-1":
Beispiel #12
0
def run(test, params, env):
    """
    Test vcpupin while numad is running
    """
    vcpu_placement = params.get("vcpu_placement")
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)

    # Prepare numatune memory parameter dict
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    libvirtd = utils_libvirtd.Libvirtd()
    libvirtd.start()

    try:
        # Get host numa node list
        host_numa_node = utils_misc.NumaInfo()
        node_list = host_numa_node.online_nodes
        logging.debug("host node list is %s", node_list)
        if numa_memory.get('nodeset'):
            used_node = utils_test.libvirt.cpus_parser(numa_memory['nodeset'])
            logging.debug("set node list is %s", used_node)
            if not status_error:
                for i in used_node:
                    if i > max(node_list):
                        raise error.TestNAError("nodeset %s out of range" %
                                                numa_memory['nodeset'])
        # Start numad
        try:
            utils.run("service numad start")
        except error.CmdError, e:
            # Bug 1218149 closed as not a bug, workaround this as in bug
            # comment 12
            logging.debug("start numad failed with %s", e)
            logging.debug("remove message queue of id 0 and try again")
            utils.run("ipcrm msg 0", ignore_status=True)
            utils.run("service numad start")

        # Start vm and do vcpupin
        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        vmxml.placement = vcpu_placement
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()
        vm.start()
        vm.wait_for_login()

        # Test vcpupin to the alive cpus list
        cpus_list = utils.cpu_online_map()
        logging.info("active cpus in host are %s", cpus_list)
        for cpu in cpus_list:
            ret = virsh.vcpupin(vm_name, 0, cpu, debug=True,
                                ignore_status=True)
            if ret.exit_status:
                logging.error("related bug url: %s", bug_url)
                raise error.TestFail("vcpupin failed: %s" % ret.stderr)
            virsh.vcpuinfo(vm_name, debug=True)