Exemplo n.º 1
0
    def host_stress_event(self, event):
        """
        Host Stress events

        :param event: event name
        """
        for itr in range(self.host_iterations):
            if "cpu_freq_governor" in event:
                cpu.set_cpufreq_governor()
                logging.debug("Current governor: %s", cpu.get_cpufreq_governor())
                time.sleep(self.event_sleep_time)
            elif "cpu_idle" in event:
                idlestate = cpu.get_cpuidle_state()
                cpu.set_cpuidle_state()
                time.sleep(self.event_sleep_time)
                cpu.set_cpuidle_state(setstate=idlestate)
                time.sleep(self.event_sleep_time)
            elif "cpuoffline" in event:
                processor = self.host_cpu_list[random.randint(0, cpu.online_cpus_count()-1)]
                cpu.offline(processor)
                time.sleep(self.event_sleep_time)
                cpu.online(processor)
            else:
                raise NotImplementedError
            time.sleep(self.itr_sleep_time)
Exemplo n.º 2
0
 def cpu_serial_off_on(self):
     """
     Offline all the cpus serially and online again
     offline 0 -> 99
     online 99 -> 0
     offline 99 -> 0
     online 0 -> 99
     """
     for _ in range(self.iteration):
         self.log.info("OFF-ON Serial Test %s", totalcpus)
         for cpus in range(1, totalcpus):
             self.log.info("cpu%s going offline" % cpus)
             cpu.offline(cpus)
         self.log.info("Online CPU's in reverse order %s", totalcpus)
         for cpus in range(totalcpus, -1, -1):
             self.log.info("cpu%s going online" % cpus)
             cpu.online(cpus)
         self.log.info("Offline CPU's in reverse order %s", totalcpus)
         for cpus in range(totalcpus, -1, -2):
             self.log.info("cpu%s going offline" % cpus)
             cpu.offline(cpus)
         self.log.info("Online CPU's in serial")
         for cpus in range(0, totalcpus):
             self.log.info("cpu%s going online" % cpus)
             cpu.online(cpus)
Exemplo n.º 3
0
    def host_stress_event(self, event):
        """
        Host Stress events

        :param event: event name
        """
        for itr in range(self.host_iterations):
            if "cpu_freq_governor" in event:
                cpu.set_cpufreq_governor()
                logging.debug("Current governor: %s",
                              cpu.get_cpufreq_governor())
                time.sleep(self.event_sleep_time)
            elif "cpu_idle" in event:
                idlestate = cpu.get_cpuidle_state()
                cpu.set_cpuidle_state()
                time.sleep(self.event_sleep_time)
                cpu.set_cpuidle_state(setstate=idlestate)
                time.sleep(self.event_sleep_time)
            elif "cpuoffline" in event:
                processor = self.host_cpu_list[random.randint(
                    0,
                    cpu.online_cpus_count() - 1)]
                cpu.offline(processor)
                time.sleep(self.event_sleep_time)
                cpu.online(processor)
            else:
                raise NotImplementedError
            time.sleep(self.itr_sleep_time)
Exemplo n.º 4
0
 def cpu_serial_off_on(self):
     """
     Offline all the cpus serially and online again
     offline 0 -> 99
     online 99 -> 0
     offline 99 -> 0
     online 0 -> 99
     """
     for _ in range(self.iteration):
         self.log.info("OFF-ON Serial Test %s", totalcpus)
         for cpus in range(1, totalcpus):
             self.log.info("cpu%s going offline" % cpus)
             cpu.offline(cpus)
         self.log.info("Online CPU's in reverse order %s", totalcpus)
         for cpus in range(totalcpus, -1, -1):
             self.log.info("cpu%s going online" % cpus)
             cpu.online(cpus)
         self.log.info("Offline CPU's in reverse order %s", totalcpus)
         for cpus in range(totalcpus, -1, -2):
             self.log.info("cpu%s going offline" % cpus)
             cpu.offline(cpus)
         self.log.info("Online CPU's in serial")
         for cpus in range(0, totalcpus):
             self.log.info("cpu%s going online" % cpus)
             cpu.online(cpus)
Exemplo n.º 5
0
def run(test, params, env):
    """
    Test the command virsh nodecpumap

    (1) Call virsh nodecpumap
    (2) Call virsh nodecpumap with pretty option
    (3) Call virsh nodecpumap with an unexpected option
    """

    option = params.get("virsh_node_options")
    status_error = params.get("status_error")
    cpu_off_on_test = params.get("cpu_off_on", "no") == "yes"
    online_cpus = cpu.cpu_online_list()
    test_cpu = random.choice(online_cpus)

    if cpu_off_on_test:
        # Turn off CPU
        cpu.offline(test_cpu)

    result = virsh.nodecpumap(option, ignore_status=True, debug=True)
    check_result(result, option, status_error, test)

    if cpu_off_on_test:
        # Turn on CPU and check again
        cpu.online(test_cpu)
        result = virsh.nodecpumap(option, ignore_status=True, debug=True)
        check_result(result, option, status_error, test)
Exemplo n.º 6
0
def run(test, params, env):
    """
    Test the command virsh nodecpumap

    (1) Call virsh nodecpumap
    (2) Call virsh nodecpumap with pretty option
    (3) Call virsh nodecpumap with an unexpected option
    """

    option = params.get("virsh_node_options")
    status_error = params.get("status_error")
    cpu_off_on_test = params.get("cpu_off_on", "no") == "yes"
    online_cpus = cpu.cpu_online_list()
    test_cpu = random.choice(online_cpus)

    if cpu_off_on_test:
        # Turn off CPU
        cpu.offline(test_cpu)

    result = virsh.nodecpumap(option, ignore_status=True, debug=True)
    check_result(result, option, status_error, test)

    if cpu_off_on_test:
        # Turn on CPU and check again
        cpu.online(test_cpu)
        result = virsh.nodecpumap(option, ignore_status=True, debug=True)
        check_result(result, option, status_error, test)
Exemplo n.º 7
0
    def pinned_cpu_stress(self):
        """
        Set process affinity and do cpu off on
        @BUG : https://lkml.org/lkml/2017/5/30/122
        """
        nodes = []
        self.log.info("\nCreate %s pids and set proc affinity", totalcpus)
        for proc in range(0, totalcpus):
            pid = process.SubProcess("while :; do :; done &",
                                     shell=True).start()
            pids.append(pid)
            process.run("taskset -pc %s %s" % (proc, pid),
                        ignore_status=True,
                        shell=True)

        self.log.info("\noffline cpus and see the affinity change")
        count = 0
        for pid in pids:
            cpu.offline(count)
            process.run("taskset -pc %s" % pid, ignore_status=True, shell=True)
            count = count + 1

        self.__online_cpus(totalcpus)

        self.log.info("\nShift affinity for the same process and toggle")
        for proc in range(totalcpus):
            process.run("taskset -pc $((%s<<1)) $$" % proc,
                        ignore_status=True,
                        shell=True)
            cpu.offline(proc)

        self.__online_cpus(totalcpus)

        self.log.info("\nSet all process affine to single NUMA node")
        nodes = process.system_output("numactl --hardware | grep cpus:",
                                      shell=True)
        nodes = nodes.split('\n')
        for node in nodes:
            cores = node.split(': ')[-1].replace(" ", ",")
            if cores:
                for pid in pids:
                    process.run("taskset -pc %s %s" % (cores, pid),
                                ignore_status=True,
                                shell=True)

        self.log.info(
            "\ntoggle random cpu, while shifting affinity of same pid")
        for i in range(self.iteration):
            core = randint(0, totalcpus)
            process.run("taskset -pc $((%s<<1)) $$" % core,
                        ignore_status=True,
                        shell=True)
            self.__cpu_toggle(core)

        self.__kill_process(pids)
Exemplo n.º 8
0
 def cpu_toggle_one_by_one(self):
     """
     Wait for the given timeout between Off/On single cpu.
     loop over all cpus for given iteration.
     """
     for _ in range(self.iteration):
         for cpus in range(totalcpus):
             self.log.info("cpu%s going offline" % cpus)
             cpu.offline(cpus)
             self.log.info("cpu%s going online" % cpus)
             cpu.online(cpus)
Exemplo n.º 9
0
 def cpu_toggle_one_by_one(self):
     """
     Wait for the given timeout between Off/On single cpu.
     loop over all cpus for given iteration.
     """
     for _ in range(self.iteration):
         for cpus in range(totalcpus):
             self.log.info("cpu%s going offline" % cpus)
             cpu.offline(cpus)
             self.log.info("cpu%s going online" % cpus)
             cpu.online(cpus)
Exemplo n.º 10
0
 def single_cpu_toggle(self):
     """
     Offline-online single cpu for given iteration
     and loop over all cpus.
     @BUG: https://lkml.org/lkml/2017/6/12/212
     """
     for cpus in range(1, totalcpus):
         for _ in range(self.iteration):
             self.log.info("cpu%s going offline" % cpus)
             cpu.offline(cpus)
             self.log.info("cpu%s going online" % cpus)
             cpu.online(cpus)
Exemplo n.º 11
0
 def single_cpu_toggle(self):
     """
     Offline-online single cpu for given iteration
     and loop over all cpus.
     @BUG: https://lkml.org/lkml/2017/6/12/212
     """
     for cpus in range(1, totalcpus):
         for _ in range(self.iteration):
             self.log.info("cpu%s going offline" % cpus)
             cpu.offline(cpus)
             self.log.info("cpu%s going online" % cpus)
             cpu.online(cpus)
Exemplo n.º 12
0
    def test_disable_enable_cpu():
        """
        Test disable a host cpu and check nodeinfo result
        """
        online_list = cputils.online_list()
        # Choose the last online host cpu to offline
        cputils.offline(online_list[-1])

        cmd_result = virsh.nodeinfo(ignore_status=True)
        output_check(cmd_result.stdout_text.strip())

        # Make the last host cpu online again
        cputils.online(online_list[-1])
Exemplo n.º 13
0
    def pinned_cpu_stress(self):
        """
        Set process affinity and do cpu off on
        @BUG : https://lkml.org/lkml/2017/5/30/122
        """
        nodes = []
        self.log.info("\nCreate %s pids and set proc affinity", totalcpus)
        for proc in range(0, totalcpus):
            pid = process.SubProcess(
                "while :; do :; done &", shell=True).start()
            pids.append(pid)
            process.run("taskset -pc %s %s" %
                        (proc, pid), ignore_status=True, shell=True)

        self.log.info("\noffline cpus and see the affinity change")
        count = 0
        for pid in pids:
            cpu.offline(count)
            process.run("taskset -pc %s" % pid, ignore_status=True, shell=True)
            count = count + 1

        self.__online_cpus(totalcpus)

        self.log.info("\nShift affinity for the same process and toggle")
        for proc in range(totalcpus):
            process.run("taskset -pc $((%s<<1)) $$" %
                        proc, ignore_status=True, shell=True)
            cpu.offline(proc)

        self.__online_cpus(totalcpus)

        self.log.info("\nSet all process affine to single NUMA node")
        nodes = process.system_output(
            "numactl --hardware | grep cpus:",  shell=True)
        nodes = nodes.split('\n')
        for node in nodes:
            cores = node.split(': ')[-1].replace(" ", ",")
            if cores:
                for pid in pids:
                    process.run("taskset -pc %s %s" %
                                (cores, pid), ignore_status=True, shell=True)

        self.log.info(
            "\ntoggle random cpu, while shifting affinity of same pid")
        for i in range(self.iteration):
            core = randint(0, totalcpus)
            process.run("taskset -pc $((%s<<1)) $$" %
                        core, ignore_status=True, shell=True)
            self.__cpu_toggle(core)

        self.__kill_process(pids)
Exemplo n.º 14
0
 def offline_cpu(self, cpu_num):
     """
     Offline the particular cpu
     """
     if cpu.offline(cpu_num):
         self.nfail += 1
         self.log.info("Failed to offline the cpu %s" % cpu_num)
     else:
         self.log.info("Offline the cpu : %s" % cpu_num)
Exemplo n.º 15
0
 def offline_cpu(self, cpu_num):
     """
     Offline the particular cpu
     """
     if cpu.offline(cpu_num):
         self.nfail += 1
         self.log.info("Failed to offline the cpu %s" % cpu_num)
     else:
         self.log.info("Offline the cpu : %s" % cpu_num)
Exemplo n.º 16
0
    def test_disable_enable_cpu():
        """
        Test disable a host cpu and check nodeinfo result

        :return: test.fail if CPU(s) number is not expected
        """
        ret_before_disable = virsh.nodeinfo(ignore_status=True, debug=True)
        cpus_nodeinfo_before = _check_nodeinfo(
            ret_before_disable.stdout_text.strip(), "CPU(s)", 2)

        online_list = cputils.online_list()
        # Choose the last online host cpu to offline
        cputils.offline(online_list[-1])

        ret_after_disable = virsh.nodeinfo(ignore_status=True, debug=True)
        cpus_nodeinfo_after = _check_nodeinfo(
            ret_after_disable.stdout_text.strip(), "CPU(s)", 2)
        if int(cpus_nodeinfo_before) != int(cpus_nodeinfo_after) + 1:
            test.fail("CPU(s) should be '%d' after 1 cpu is disabled, "
                      "but found '%s'" %
                      (int(cpus_nodeinfo_before) - 1, cpus_nodeinfo_after))
        # Make the last host cpu online again
        cputils.online(online_list[-1])
Exemplo n.º 17
0
 def test_cpumask_cpu_off(self):
     # Get the online cpu list
     online_cpus = cpu.online_list()
     self.log.info("Online CPU list: %s" % online_cpus)
     pmu1 = list(self.all_events.keys())[0]
     disable_cpu = self._get_cpumask(pmu1)
     # Disable cpu with one PMU cpumask
     if cpu.offline(disable_cpu):
         self.fail("Can't offline cpumask cpu %s" % disable_cpu)
     current_cpu = self._get_cpumask(pmu1)
     self.log.info("Current CPU: %s" % current_cpu)
     self._check_cpumask()
     # After confirming cpu got disabled, enable back
     if current_cpu in online_cpus and disable_cpu != current_cpu:
         if cpu.online(disable_cpu):
             self.fail("Can't online cpu %s" % disable_cpu)
Exemplo n.º 18
0
def test_disable_enable_cpu(test, host_cpus_list, params):
    """
    Test nodecpustats command when disable one cpu and then enable it respectively

    :param test: test object
    :param host_cpus_list: list, host cpu list
    :param params: dict, test parameters
    :raises: test.error if cpu offline or online fails
    """
    logging.debug("Offline host cpu %s" % host_cpus_list[-1])
    if cpuutil.offline(host_cpus_list[-1]):
        test.error("Failed to offline host cpu %s" % host_cpus_list[-1])
    option = "--cpu %s" % host_cpus_list[-1]
    status, output = run_nodecpustats(option)
    err_msg = params.get("err_msg", '')
    libvirt.check_result(output, expected_fails=[err_msg])

    logging.debug("Online host cpu %s" % host_cpus_list[-1])
    if cpuutil.online(host_cpus_list[-1]):
        test.error("Failed to online host cpu %s" % host_cpus_list[-1])
    subtest_cpu_percentage_option(test,
                                  host_cpus_list[-1],
                                  with_cpu_option=False)
Exemplo n.º 19
0
    def cpus_toggle(self):
        """
        Toggle CPUS online and offline
        """
        totalcpus = multiprocessing.cpu_count()
        full_count = int(totalcpus) - 1
        half_count = int(totalcpus) / 2 - 1
        shalf_count = int(totalcpus) / 2
        fcpu = "0 - " "%s" % half_count
        scpu = "%s - %s" % (shalf_count, full_count)

        self.log.info("Online all cpus %s", totalcpus)
        for cpus in range(0, full_count):
            cpu.online(cpus)
        time.sleep(10)

        self.log.info("Offline all cpus 0 - %s\n", full_count)
        for cpus in range(0, full_count):
            cpu.offline(cpus)
        time.sleep(10)

        self.log.info("Online all cpus 0 - %s\n", full_count)
        for cpus in range(0, full_count):
            cpu.online(cpus)

        self.log.info("Offline and online first half cpus %s\n", fcpu)
        for cpus in range(0, half_count):
            cpu.offline(cpus)
            time.sleep(10)
            cpu.online(cpus)

        self.log.info("Offline and online second half cpus %s\n", scpu)
        for cpus in range(shalf_count, full_count):
            cpu.offline(cpus)
            time.sleep(10)
            cpu.online(cpus)
Exemplo n.º 20
0
def run(test, params, env):
    """
    Test vcpu affinity feature as follows:
    positive test:
        1. use vcpu cpuset in xml to define vcpu affinity
        2. use cputune cpuset in xml to define vcpu affinity
        3. use offline-to-online host cpu as cpuset to run virsh vcpupin
        4. set vcpu placement in xml to auto and check xml result
        5. set vcpu cpuset in xml without placement defined and check xml result
    negative test:
        1. use outrange cpuset as vcpu cpuset in xml to define vcpu affinity
        2. use outrange cpuset as cputune cpuset in xml to define vcpu affinity
        3. use invalid cpuset as cputune cpuset in xml to define vcpu affinity
        4. use duplicate vcpu in xml to define vcpu affinity
        5. use offline host cpu as cputune cpuset to run virsh vcpupin
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    cpuset_mask = params.get("cpuset_mask", "")
    vcpu = params.get("vcpu", "0")
    maxvcpu = params.get("maxvcpu", "8")
    current_vcpu = params.get("current_vcpu", "3")
    check = params.get("check", "")
    config_xml = params.get("config_xml", "")

    status_error = "yes" == params.get("status_error", "no")
    define_fail = "yes" == params.get("define_fail", "no")
    start_fail = "yes" == params.get("start_fail", "no")
    runtime_fail = "yes" == params.get("runtime_fail", "no")
    vm_down = "yes" == params.get("vm_down", "no")

    vcpu_cpuset = params.get("vcpu_cpuset", "")
    cputune_cpuset = params.get("cputune_cpuset", "")
    vcpu_placement = params.get("vcpu_placement", "static")
    err_msg = params.get("err_msg", "")
    start_timeout = int(params.get("start_timeout", "60"))
    offline_hostcpus = params.get("offline_hostcpus", "")
    machine_cpuset_path = params.get("machine_cpuset_path", "")

    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    def check_vcpu_affinity():
        """
        check vcpu affinity defined by vcpu cpuset or cputune cpuset
        """
        affinity = vcpu_cpuset if not cputune_cpuset else cputune_cpuset
        affinity = {vcpu: affinity}
        virsh.vcpuinfo(vm_name, debug=True)
        # check the expected vcpu affinity with the one got from running vm
        if not utils_hotplug.check_affinity(vm, affinity):
            test.fail("vcpu affinity check fail")

    try:
        hostcpu_num = int(cpu.total_cpus_count())

        # online all host cpus
        for x in range(hostcpu_num):
            if cpu.online(x):
                test.fail("fail to online cpu{}".format(x))

        # use vcpu cpuset or/and cputune cpuset to define xml
        del vmxml.cputune
        del vmxml.vcpus
        del vmxml.placement
        vmxml.vcpu = int(maxvcpu)
        vmxml.current_vcpu = current_vcpu

        # config vcpu cpuset for cpuset range test
        num = 1 if not status_error else 0
        cpuset_new = "0-{},^{}".format(hostcpu_num - num, cpuset_mask)
        if (config_xml == "vcpu" and check.endswith("range_cpuset")):
            vcpu_cpuset = cpuset_new
        vmxml.cpuset = vcpu_cpuset

        if vcpu_placement:
            vmxml.placement = vcpu_placement

        if config_xml == "cputune":
            cputune = vm_xml.VMCPUTuneXML()
            if check.endswith("range_cpuset"):
                cputune_cpuset = cpuset_new
            if check.endswith("duplicate_vcpu"):
                cputune.vcpupins = [{
                    'vcpu': vcpu,
                    'cpuset': "2"
                }, {
                    'vcpu': vcpu,
                    'cpuset': "3"
                }]
            else:
                cputune.vcpupins = [{'vcpu': vcpu, 'cpuset': cputune_cpuset}]
            vmxml.cputune = cputune

        logging.debug(vmxml)
        if status_error and define_fail:
            result_to_check = virsh.define(vmxml.xml, debug=True)
        else:
            vmxml.sync()

        # start the vm
        if status_error and start_fail:
            result_to_check = virsh.start(vm_name, debug=True)

        if (not status_error) or runtime_fail:
            result_to_check = virsh.start(vm_name,
                                          debug=True,
                                          ignore_status=False)
            vm.wait_for_login(timeout=start_timeout).close()

            # test vcpu cpuset in offline/online  host cpu scenario
            if check.endswith("offline_hostcpu"):
                if vm_down:
                    vm.shutdown()
                for x in offline_hostcpus.split(','):
                    if cpu.offline(x):
                        test.fail("fail to offline cpu{}".format(x))
                    logging.debug("offline host cpu {}".format(x))
                if vm_down:
                    vm.start()
                    vm.wait_for_login(timeout=start_timeout).close()
                if not status_error:
                    # online host cpu
                    if cpu.online(cputune_cpuset):
                        test.fail(
                            "fail to online cpu{}".format(cputune_cpuset))

            # run virsh vcpupin to config vcpu affinity
            if check.startswith("cputune") and (not config_xml):
                result_to_check = virsh.vcpupin(vm_name,
                                                vcpu,
                                                cputune_cpuset,
                                                debug=True)

            if check == "vcpu_placement":
                check_vcpu_placement(test, params)
            elif not status_error:
                check_vcpu_affinity()

        if 'result_to_check' in locals():
            if err_msg:
                err_msg = err_msg.split(";")
            libvirt.check_result(result_to_check, err_msg)

    finally:
        vmxml_backup.sync()

        # recovery the host cpu env
        for x in range(hostcpu_num):
            cpu.online(x)
        cmd = "echo '0-{}' > {}".format(hostcpu_num - 1, machine_cpuset_path)
        process.run(cmd, shell=True)
Exemplo n.º 21
0
 def __cpu_toggle(core):
     if cpu._get_cpu_status(core):
         cpu.offline(core)
     else:
         cpu.online(core)
Exemplo n.º 22
0
 def __cpu_toggle(core):
     if cpu._get_cpu_status(core):
         cpu.offline(core)
     else:
         cpu.online(core)
Exemplo n.º 23
0
 def __offline_cpus(cores):
     for cpus in range(cores):
         cpu.offline(cpus)
Exemplo n.º 24
0
def run(test, params, env):
    """
    Test vcpu affinity feature as follows:
    positive test:
        1. use vcpu cpuset in xml to define vcpu affinity
        2. use cputune cpuset in xml to define vcpu affinity
        3. use offline-to-online host cpu as cpuset to run virsh vcpupin
        4. set vcpu placement in xml to auto and check xml result
        5. set vcpu cpuset in xml without placement defined and check xml result
    negative test:
        1. use outrange cpuset as vcpu cpuset in xml to define vcpu affinity
        2. use outrange cpuset as cputune cpuset in xml to define vcpu affinity
        3. use invalid cpuset as cputune cpuset in xml to define vcpu affinity
        4. use duplicate vcpu in xml to define vcpu affinity
        5. use offline host cpu as cputune cpuset to run virsh vcpupin
        6. set vcpu affinity for none exists vcpu and check xml result
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    cpuset_mask = params.get("cpuset_mask", "")
    vcpu = params.get("vcpu", "0")
    maxvcpu = params.get("maxvcpu", "8")
    current_vcpu = params.get("current_vcpu", "3")
    check = params.get("check", "")
    config_xml = params.get("config_xml", "")

    status_error = "yes" == params.get("status_error", "no")
    define_fail = "yes" == params.get("define_fail", "no")
    start_fail = "yes" == params.get("start_fail", "no")
    runtime_fail = "yes" == params.get("runtime_fail", "no")
    vm_down = "yes" == params.get("vm_down", "no")

    vcpu_cpuset = params.get("vcpu_cpuset", "")
    cputune_cpuset = params.get("cputune_cpuset", "")
    vcpu_placement = params.get("vcpu_placement", "static")
    err_msg = params.get("err_msg", "")
    start_timeout = int(params.get("start_timeout", "60"))
    offline_hostcpus = params.get("offline_hostcpus", "")
    machine_cpuset_path = params.get("machine_cpuset_path", "")

    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    def check_vcpu_affinity():
        """
        check vcpu affinity defined by vcpu cpuset or cputune cpuset
        """
        affinity = vcpu_cpuset if not cputune_cpuset else cputune_cpuset
        affinity = {vcpu: affinity}
        virsh.vcpuinfo(vm_name, debug=True)

        vmxml_live = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(vmxml_live)

        # if vcpu >= maxvcpu, the cputune should not exist in xml
        if int(vcpu) >= int(maxvcpu):
            try:
                if hasattr(vmxml_live, 'cputune'):
                    test.fail("cputune tag is set when vcpu >= maxvcpu")
        # check the expected vcpu affinity with the one got from running vm
                elif not utils_hotplug.check_affinity(vm, affinity):
                    test.fail("vcpu affinity check fail")
            except xcepts.LibvirtXMLError:
                pass

    try:
        hostcpu_num = int(cpu.total_cpus_count())

        # online all host cpus
        for x in range(hostcpu_num):
            if cpu.online(x):
                test.fail("fail to online cpu{}".format(x))

        # use vcpu cpuset or/and cputune cpuset to define xml
        del vmxml.cputune
        del vmxml.vcpus
        del vmxml.placement
        vmxml.vcpu = int(maxvcpu)
        vmxml.current_vcpu = current_vcpu

        # config vcpu cpuset for cpuset range test
        num = 1 if not status_error else 0
        cpuset_new = "0-{},^{}".format(hostcpu_num-num, cpuset_mask)
        if (config_xml == "vcpu" and check.endswith("range_cpuset")):
            vcpu_cpuset = cpuset_new
        vmxml.cpuset = vcpu_cpuset

        if vcpu_placement:
            vmxml.placement = vcpu_placement

        if config_xml == "cputune":
            cputune = vm_xml.VMCPUTuneXML()
            if check.endswith("range_cpuset"):
                cputune_cpuset = cpuset_new
            if check.endswith("duplicate_vcpu"):
                cputune.vcpupins = [{'vcpu': vcpu, 'cpuset': "2"}, {'vcpu': vcpu, 'cpuset': "3"}]
            else:
                cputune.vcpupins = [{'vcpu': vcpu, 'cpuset': cputune_cpuset}]
            vmxml.cputune = cputune

        logging.debug(vmxml)
        if status_error and define_fail:
            result_to_check = virsh.define(vmxml.xml, debug=True)
        else:
            vmxml.sync()

        # start the vm
        if status_error and start_fail:
            result_to_check = virsh.start(vm_name, debug=True)

        if (not status_error) or runtime_fail:
            result_to_check = virsh.start(vm_name, debug=True, ignore_status=False)
            vm.wait_for_login(timeout=start_timeout).close()

            # test vcpu cpuset in offline/online  host cpu scenario
            if check.endswith("offline_hostcpu"):
                if vm_down:
                    vm.shutdown()
                for x in offline_hostcpus.split(','):
                    if cpu.offline(x):
                        test.fail("fail to offline cpu{}".format(x))
                    logging.debug("offline host cpu {}".format(x))
                if vm_down:
                    vm.start()
                    vm.wait_for_login(timeout=start_timeout).close()
                if not status_error:
                    # online host cpu
                    if cpu.online(cputune_cpuset):
                        test.fail("fail to online cpu{}".format(cputune_cpuset))

            # run virsh vcpupin to config vcpu affinity
            if check.startswith("cputune") and (not config_xml):
                result_to_check = virsh.vcpupin(vm_name, vcpu, cputune_cpuset, debug=True)

            if check == "vcpu_placement":
                check_vcpu_placement(test, params)
            elif not status_error:
                check_vcpu_affinity()

        if 'result_to_check' in locals():
            if err_msg:
                err_msg = err_msg.split(";")
            libvirt.check_result(result_to_check, err_msg)

    finally:
        vmxml_backup.sync()

        # recovery the host cpu env
        for x in range(hostcpu_num):
            cpu.online(x)
        cmd = "echo '0-{}' > {}".format(hostcpu_num-1, machine_cpuset_path)
        process.run(cmd, shell=True)
Exemplo n.º 25
0
def run(test, params, env):
    """
    Test vcpu affinity feature as follows:
    positive test:
        1. use vcpu cpuset in xml to define vcpu affinity
        2. use cputune cpuset in xml to define vcpu affinity
        3. use offline-to-online host cpu as cpuset to run virsh vcpupin
        4. set vcpu placement in xml to auto and check xml result
        5. set vcpu cpuset in xml without placement defined and check xml result
        6. specify vcpu affinity for inactive vcpu
    negative test:
        1. use outrange cpuset as vcpu cpuset in xml to define vcpu affinity
        2. use outrange cpuset as cputune cpuset in xml to define vcpu affinity
        3. use invalid cpuset as cputune cpuset in xml to define vcpu affinity
        4. use duplicate vcpu in xml to define vcpu affinity
        5. use offline host cpu as cputune cpuset to run virsh vcpupin
        6. set vcpu affinity for none exists vcpu and check xml result
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    cpuset_mask = params.get("cpuset_mask", "")
    vcpu = params.get("vcpu", "0")
    setvcpus_option = params.get("setvcpus_option", "")
    setvcpus_count = params.get("setvcpus_count", "0")
    vcpupin_option = params.get("vcpupin_option", "")
    maxvcpu = params.get("maxvcpu", "8")
    current_vcpu = params.get("current_vcpu", "3")
    check = params.get("check", "")
    config_xml = params.get("config_xml", "")

    status_error = "yes" == params.get("status_error", "no")
    define_fail = "yes" == params.get("define_fail", "no")
    start_fail = "yes" == params.get("start_fail", "no")
    runtime_fail = "yes" == params.get("runtime_fail", "no")
    hotplug_vcpu = "yes" == params.get("hotplug_vcpu", "no")

    vcpu_cpuset = params.get("vcpu_cpuset", "")
    cputune_cpuset = params.get("cputune_cpuset", "")
    vcpu_placement = params.get("vcpu_placement", "static")
    err_msg = params.get("err_msg", "")
    start_timeout = int(params.get("start_timeout", "180"))
    offline_hostcpus = params.get("offline_hostcpus", "")
    machine_cpuset_path = params.get("machine_cpuset_path", "")

    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    def check_vcpu_affinity():
        """
        check vcpu affinity defined by vcpu cpuset or cputune cpuset
        """
        affinity = vcpu_cpuset if not cputune_cpuset else cputune_cpuset
        affinity = {vcpu: affinity}
        virsh.vcpuinfo(vm_name, debug=True)
        host_cpu_count = cpuutil.total_cpus_count()

        vmxml_live = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(vmxml_live)

        # if vcpu >= maxvcpu, the cputune should not exist in xml
        if int(vcpu) >= int(maxvcpu):
            try:
                if hasattr(vmxml_live, 'cputune'):
                    test.fail("cputune tag is set when vcpu >= maxvcpu")
            except xcepts.LibvirtXMLError:
                pass
        elif "config" in vcpupin_option:
            vcpu_affinity = cpu.affinity_from_vcpupin(vm, vcpu, vcpupin_option)
            affinity = cpu.cpus_string_to_affinity_list(
                str(affinity[vcpu]), host_cpu_count)
            logging.debug("vcpu_affinity {}".format(vcpu_affinity))
            logging.debug("affinity {}".format(affinity))
            if vcpu_affinity[int(vcpu)] != affinity:
                test.fail("vcpu affinity check fail")
        # check the expected vcpu affinity with the one got from running vm
        elif not cpu.check_affinity(vm, affinity):
            test.fail("vcpu affinity check fail")

    try:
        hostcpu_num = int(cpuutil.total_cpus_count())
        if hostcpu_num < 8:
            test.cancel("The host should have at least 8 CPUs for this test.")

        # online all host cpus
        for x in range(1, hostcpu_num):
            if cpuutil.online(x):
                test.fail("fail to online cpu{}".format(x))

        # use vcpu cpuset or/and cputune cpuset to define xml
        del vmxml.cputune
        del vmxml.vcpus
        del vmxml.placement
        vmxml.vcpu = int(maxvcpu)
        vmxml.current_vcpu = current_vcpu

        # Remove cpu topology to avoid that it doesn't match vcpu count
        if vmxml.get_cpu_topology():
            new_cpu = vmxml.cpu
            del new_cpu.topology
            vmxml.cpu = new_cpu

        # config vcpu cpuset for cpuset range test
        num = 1 if not status_error else 0
        cpuset_new = "0-{},^{}".format(hostcpu_num-num, cpuset_mask)
        if (config_xml == "vcpu" and check.endswith("range_cpuset")):
            vcpu_cpuset = cpuset_new
        vmxml.cpuset = vcpu_cpuset

        if vcpu_placement:
            vmxml.placement = vcpu_placement

            # Remove numatune node since it will be automatically set
            # under 'auto' state
            if vcpu_placement == 'auto':
                vmxml.xmltreefile.remove_by_xpath('/numatune', remove_all=True)
                vmxml.xmltreefile.write()

        if config_xml == "cputune":
            cputune = vm_xml.VMCPUTuneXML()
            if check.endswith("range_cpuset"):
                cputune_cpuset = cpuset_new
            if check.endswith("duplicate_vcpu"):
                cputune.vcpupins = [{'vcpu': vcpu, 'cpuset': "2"}, {'vcpu': vcpu, 'cpuset': "3"}]
            else:
                cputune.vcpupins = [{'vcpu': vcpu, 'cpuset': cputune_cpuset}]
            vmxml.cputune = cputune

        logging.debug(vmxml)
        if status_error and define_fail:
            result_to_check = virsh.define(vmxml.xml, debug=True)
        else:
            vmxml.sync()

        # test vcpu cpuset in offline/online host cpu scenario
        if check.endswith("offline_hostcpu"):
            for x in offline_hostcpus.split(','):
                if cpuutil.offline(x):
                    test.fail("fail to offline cpu{}".format(x))
                logging.debug("offline host cpu {}".format(x))

        # start the vm
        if status_error and start_fail:
            result_to_check = virsh.start(vm_name, debug=True)

        if (not status_error) or runtime_fail:
            vm.start()
            vm.wait_for_login(timeout=start_timeout).close()

            # test vcpu cpuset in offline/online host cpu scenario
            if check.endswith("offline_hostcpu") and not status_error:
                # online host cpu
                if cpuutil.online(cputune_cpuset):
                    test.fail("fail to online cpu{}".format(cputune_cpuset))

            # run virsh vcpupin to config vcpu affinity
            if check.startswith("cputune") and (not config_xml):
                result_to_check = virsh.vcpupin(vm_name, vcpu, cputune_cpuset, vcpupin_option, debug=True)

            # hotplug vcpu test scenario
            if hotplug_vcpu:
                virsh.setvcpus(vm_name, setvcpus_count, setvcpus_option, debug=True, ignore_status=False)

            libvirtd_restart = False
            while True:
                if check == "vcpu_placement":
                    check_vcpu_placement(test, params)
                elif not status_error:
                    check_vcpu_affinity()
                if libvirtd_restart:
                    break
                # restart libvirtd and check vcpu affinity again
                utils_libvirtd.Libvirtd().restart()
                libvirtd_restart = True

        if 'result_to_check' in locals():
            if err_msg:
                err_msg = err_msg.split(";")
            libvirt.check_result(result_to_check, err_msg)

    finally:
        vmxml_backup.sync()

        # recovery the host cpu env
        for x in range(1, hostcpu_num):
            cpuutil.online(x)
        cmd = "echo '0-{}' > {}".format(hostcpu_num-1, machine_cpuset_path)
        process.run(cmd, shell=True)
Exemplo n.º 26
0
 def __offline_cpus(cores):
     for cpus in range(cores):
         cpu.offline(cpus)