Ejemplo n.º 1
0
 def __init__(self, params, env):
     """
     :param params: test param
     """
     self.host_cpu_list = cpu.cpu_online_list()
     self.iterations = int(params.get("stress_itrs", 1))
     self.event_sleep_time = int(params.get("event_sleep_time", 10))
     self.current_vcpu = params.get("smp", 32)
     self.max_vcpu = params.get("virsh_maxcpus", 32)
     self.ignore_status = params.get("ignore_status", "no") == "yes"
     self.vms = env.get_all_vms()
     self.events = params.get("stress_events", "reboot").split(',')
     self.threads = []
     self.iface_num = params.get("iface_num", '1')
     self.iface_type = params.get("iface_type", "network")
     self.iface_model = params.get("iface_model", "virtio")
     self.iface_source = eval(params.get("iface_source",
                                         "{'network':'default'}"))
     self.attach_option = params.get("attach_option", "")
     self.detach_option = params.get("detach_option", "")
     self.disk_size = params.get("virt_disk_device_size", "1")
     self.disk_type = params.get("disk_type", "file")
     self.disk_device = params.get("disk_device", "disk")
     self.disk_format = params.get("disk_format", "qcow2")
     self.device_target = params.get(
         "virt_disk_device_target", "vda").split()
     self.path = params.get("path", "")
     self.device_source_names = params.get(
         "virt_disk_device_source", "").split()
     self.disk_driver = params.get("driver_name", "qemu")
Ejemplo n.º 2
0
def run(test, params, env):
    """
    Test the command virsh nodecpumap

    (1) Call virsh nodecpumap
    (2) Call virsh nodecpumap with pretty option
    (3) Call virsh nodecpumap with an unexpected option
    """

    option = params.get("virsh_node_options")
    status_error = params.get("status_error")
    cpu_off_on_test = params.get("cpu_off_on", "no") == "yes"
    online_cpus = cpu.cpu_online_list()
    test_cpu = random.choice(online_cpus)

    if cpu_off_on_test:
        # Turn off CPU
        cpu.offline(test_cpu)

    result = virsh.nodecpumap(option, ignore_status=True, debug=True)
    check_result(result, option, status_error, test)

    if cpu_off_on_test:
        # Turn on CPU and check again
        cpu.online(test_cpu)
        result = virsh.nodecpumap(option, ignore_status=True, debug=True)
        check_result(result, option, status_error, test)
Ejemplo n.º 3
0
 def test(self):
     """
     Validate the number of cpu idle states against device tree
     """
     for var in range(1, 10):
         cpu_num = random.choice(cpu.cpu_online_list())
         self.log.info("--------CPU: %s--------" % cpu_num)
         states = process.system_output("cpupower -c %s idle-info --silent"
                                        " | grep 'Number of idle states:' |"
                                        "awk '{print $5}'"
                                        % cpu_num, shell=True)
         cpu_idle_states = []
         for i in range(1, int(states)):
             val = process.system_output("cat /sys/devices/system/cpu/"
                                         "cpu%s/cpuidle/state%s/"
                                         "name" % (cpu_num, i))
             if 'power8' in cpu.get_cpu_arch():
                 val = self.set_idle_states(val)
             cpu_idle_states.append(val)
         devicetree_list = self.read_from_device_tree()
         res = cmp(cpu_idle_states, devicetree_list)
         if res == 0:
             self.log.info("PASS : Validated the idle states")
         else:
             self.log.info(" cpupower tool : %s and device tree"
                           ": %s" % (cpu_idle_states, devicetree_list))
             self.fail("FAIL: Please check the idle states")
Ejemplo n.º 4
0
    def test(self):

        mgen_flag = False
        mgen = os.path.join(self.sourcedir, 'test/mgen/mgen')
        self.numa_pid = process.SubProcess(
            'numatop -d result_file', shell=True)
        self.numa_pid.start()

        # Run mgen for 5 seconds to generate a single snapshot of numatop
        process.run('%s -a 0 -c %s -t 5' %
                    (mgen, cpu.cpu_online_list()[0]), shell=True, sudo=True)

        # Kill numatop recording after running mgen
        self.numa_pid.terminate()

        # Analyse record file for mgen record
        with open('%s/result_file' % self.sourcedir, 'r') as f_read:
            lines = f_read.readlines()
            for line in lines:
                if 'mgen' in line:
                    mgen_flag = True
                    break
        if not mgen_flag:
            self.fail('Numatop failed to record mgen latency. Please check '
                      'the record file: %s/result_file' % self.sourcedir)
Ejemplo n.º 5
0
def get_cpustats(vm, cpu=None):
    """
    Get the cpustats output of a given domain
    :param vm: VM domain
    :param cpu: Host cpu index, default all cpus
    :return: dict of cpu stats values
    result format:
    {0:[vcputime,emulatortime,cputime]
    ..
    'total':[cputime]}
     """
    host_cpu_online = utils.cpu_online_list()
    cpustats = {}
    if cpu:
        cpustats[cpu] = []
        option = "--start %s --count 1" % cpu
        result = virsh.cpu_stats(vm.name, option)
        if result.exit_status != 0:
            logging.error("cpu stats command failed: %s",
                          results_stderr_52lts(result))
            return None
        output = results_stdout_52lts(result).strip().split()
        if re.match("CPU%s" % cpu, output[0]):
            cpustats[cpu] = [float(output[5]),  # vcputime
                             float(output[2]) - float(output[5]),  # emulator
                             float(output[2])]  # cputime

    else:
        for i in range(len(host_cpu_online)):
            cpustats[host_cpu_online[i]] = []
            option = "--start %s --count 1" % host_cpu_online[i]
            result = virsh.cpu_stats(vm.name, option)
            if result.exit_status != 0:
                logging.error("cpu stats command failed: %s",
                              results_stderr_52lts(result))
                return None
            output = results_stdout_52lts(result).strip().split()
            if re.match("CPU%s" % host_cpu_online[i], output[0]):
                cpustats[host_cpu_online[i]] = [float(output[5]),
                                                float(output[2]) - float(output[5]),
                                                float(output[2])]
    result = virsh.cpu_stats(vm.name, "--total")
    cpustats["total"] = []
    if result.exit_status != 0:
        logging.error("cpu stats command failed: %s",
                      results_stderr_52lts(result))
        return None
    output = results_stdout_52lts(result).strip().split()
    cpustats["total"] = [float(output[2])]  # cputime
    return cpustats
Ejemplo n.º 6
0
def run(test, params, env):
    """
    Test the command virsh nodecpustats

    (1) Call the virsh nodecpustats command for all cpu host cpus
        separately
    (2) Get the output
    (3) Check the against /proc/stat output(o) for respective cpu
        user: o[0] + o[1]
        system: o[2] + o[5] + o[6]
        idle: o[3]
        iowait: o[4]
    (4) Call the virsh nodecpustats command with an unexpected option
    (5) Call the virsh nodecpustats command with libvirtd service stop
    """
    def get_expected_stat(cpu=None):
        """
        Parse cpu stats from /proc/stat

        :param cpu: cpu index, None for total cpu stat
        :return: dict of cpu stats
        """
        stats = {}
        cpu_stat = []
        with open("/proc/stat", "r") as fl:
            for line in fl.readlines():
                if line.startswith("cpu"):
                    cpu_stat.append(line.strip().split(" ")[1:])
        # Delete additional space in the total cpu stats line
        del cpu_stat[0][0]
        if cpu is None:
            idx = 0
        else:
            idx = int(cpu) + 1
        stats['user'] = int(cpu_stat[idx][0]) + int(cpu_stat[idx][1])
        stats['system'] = int(cpu_stat[idx][2]) + int(cpu_stat[idx][5]) + int(
            cpu_stat[idx][6])
        stats['idle'] = int(cpu_stat[idx][3])
        stats['iowait'] = int(cpu_stat[idx][4])
        stats['total'] = stats['user'] + stats['system'] + stats[
            'idle'] + stats['iowait']
        return stats

    def virsh_check_nodecpustats_percpu(actual_stats, cpu):
        """
        Check the actual nodecpustats output value
        total time <= total stat from proc

        :param actual_stats: Actual cpu stats
        :param cpu: cpu index

        :return: True if matches, else failout
        """

        # Normalise to seconds from nano seconds
        total = float(
            (actual_stats['system'] + actual_stats['user'] +
             actual_stats['idle'] + actual_stats['iowait']) / (10**9))

        expected = get_expected_stat(cpu)
        if not total <= expected['total']:
            test.fail("Commands 'virsh nodecpustats' not succeeded"
                      " as total time: %f is more"
                      " than proc/stat: %f" % (total, expected['total']))
        return True

    def virsh_check_nodecpustats(actual_stats):
        """
        Check the actual nodecpustats output value
        total time <= total stat from proc

        :param actual_stats: Actual cpu stats
        :return: True if matches, else failout
        """

        # Normalise to seconds from nano seconds and get for one cpu
        total = float(
            ((actual_stats['system'] + actual_stats['user'] +
              actual_stats['idle'] + actual_stats['iowait']) / (10**9)))
        expected = get_expected_stat()
        if not total <= expected['total']:
            test.fail("Commands 'virsh nodecpustats' not succeeded"
                      " as total time: %f is more"
                      " than proc/stat: %f" % (total, expected['total']))
        return True

    def virsh_check_nodecpustats_percentage(actual_per):
        """
        Check the actual nodecpustats percentage adds up to 100%

        :param actual_per: Actual cpu stats percentage
        :return: True if matches, else failout
        """

        total = int(
            round(actual_per['user'] + actual_per['system'] +
                  actual_per['idle'] + actual_per['iowait']))

        if not total == 100:
            test.fail("Commands 'virsh nodecpustats' not succeeded"
                      " as the total percentage value: %d"
                      " is not equal 100" % total)

    def parse_output(output):
        """
        To get the output parsed into a dictionary

        :param output: virsh command output
        :return: dict of user,system,idle,iowait times
        """

        # From the beginning of a line, group 1 is one or more word-characters,
        # followed by zero or more whitespace characters and a ':',
        # then one or more whitespace characters,
        # followed by group 2, which is one or more digit characters,
        # e.g as below
        # user:                  6163690000000
        #
        regex_obj = re.compile(r"^(\w+)\s*:\s+(\d+)")
        actual = {}

        for line in output.stdout.split('\n'):
            match_obj = regex_obj.search(line)
            # Due to the extra space in the list
            if match_obj is not None:
                name = match_obj.group(1)
                value = match_obj.group(2)
                actual[name] = int(value)
        return actual

    def parse_percentage_output(output):
        """
        To get the output parsed into a dictionary

        :param output: virsh command output
        :return: dict of user,system,idle,iowait times
        """

        # From the beginning of a line, group 1 is one or more word-characters,
        # followed by zero or more whitespace characters and a ':',
        # then one or more whitespace characters,
        # followed by group 2, which is one or more digit characters,
        # e.g as below
        # user:             1.5%
        #
        regex_obj = re.compile(r"^(\w+)\s*:\s+(\d+.\d+)")
        actual_percentage = {}

        for line in output.stdout.split('\n'):
            match_obj = regex_obj.search(line)
            # Due to the extra space in the list
            if match_obj is not None:
                name = match_obj.group(1)
                value = match_obj.group(2)
                actual_percentage[name] = float(value)
        return actual_percentage

    # Initialize the variables
    itr = int(params.get("inner_test_iterations"))
    option = params.get("virsh_cpunodestats_options")
    invalid_cpunum = params.get("invalid_cpunum")
    status_error = params.get("status_error")
    libvirtd = params.get("libvirtd", "on")

    # Prepare libvirtd service
    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    # Get the host cpu list
    host_cpus_list = cpuutil.cpu_online_list()

    # Run test case for 5 iterations default can be changed in subtests.cfg
    # file
    for i in range(itr):

        if status_error == "yes":
            if invalid_cpunum == "yes":
                option = "--cpu %s" % (len(host_cpus_list) + 1)
            output = virsh.nodecpustats(ignore_status=True, option=option)
            status = output.exit_status

            if status == 0:
                if libvirtd == "off":
                    utils_libvirtd.libvirtd_start()
                    test.fail("Command 'virsh nodecpustats' "
                              "succeeded with libvirtd service "
                              "stopped, incorrect")
                else:
                    test.fail("Command 'virsh nodecpustats %s' "
                              "succeeded (incorrect command)" % option)

        elif status_error == "no":
            # Run the testcase for each cpu to get the cpu stats
            for idx, cpu in enumerate(host_cpus_list):
                option = "--cpu %s" % cpu
                output = virsh.nodecpustats(ignore_status=True, option=option)
                status = output.exit_status

                if status == 0:
                    actual_value = parse_output(output)
                    virsh_check_nodecpustats_percpu(actual_value, idx)
                else:
                    test.fail("Command 'virsh nodecpustats %s'"
                              "not succeeded" % option)

            # Run the test case for each cpu to get the cpu stats in percentage
            for cpu in host_cpus_list:
                option = "--cpu %s --percent" % cpu
                output = virsh.nodecpustats(ignore_status=True, option=option)
                status = output.exit_status

                if status == 0:
                    actual_value = parse_percentage_output(output)
                    virsh_check_nodecpustats_percentage(actual_value)
                else:
                    test.fail("Command 'virsh nodecpustats %s'"
                              " not succeeded" % option)

            option = ''
            # Run the test case for total cpus to get the cpus stats
            output = virsh.nodecpustats(ignore_status=True, option=option)
            status = output.exit_status

            if status == 0:
                actual_value = parse_output(output)
                virsh_check_nodecpustats(actual_value)
            else:
                test.fail("Command 'virsh nodecpustats %s'"
                          " not succeeded" % option)

            # Run the test case for the total cpus to get the stats in
            # percentage
            option = "--percent"
            output = virsh.nodecpustats(ignore_status=True, option=option)
            status = output.exit_status

            if status == 0:
                actual_value = parse_percentage_output(output)
                virsh_check_nodecpustats_percentage(actual_value)
            else:
                test.fail("Command 'virsh nodecpustats %s'"
                          " not succeeded" % option)

    # Recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()
Ejemplo n.º 7
0
def run(test, params, env):
    """
    Domain CPU management testing.

    1. Prepare a domain for testing, install qemu-guest-ga if needed.
    2. Checking for vcpu numbers in vcpucount, vcpuinfo, domain xml,
       vcpupin and inside domain.
    3. Plug vcpu for the domain.
    4. Repeat step 2 to check again.
    5. Control domain(save, managedsave, s3, s4, etc.).
    6. Repeat step 2 to check again.
    7. Recover domain(restore, wakeup, etc.).
    8. Repeat step 2 to check again.
    9. Unplug vcpu for the domain.
    10. Repeat step 2 to check again.
    11. Repeat step 5 to control domain(As BZ#1088216 not fix, skip
        save/managedsave related actions).
    12. Repeat step 2 to check again.
    13. Repeat step 7 to recover domain.
    14. Repeat step 2 to check again.
    15. Recover test environment.
    """
    def manipulate_domain(vm_name, vm_operation, recover=False):
        """
        Operate domain to given state or recover it.

        :params vm_name: Name of the VM domain
        :params vm_operation: Operation to be performed on VM domain
                              like save, managedsave, suspend
        :params recover: flag to inform whether to set or reset
                         vm_operation
        """
        global vm_uptime_init
        save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
        if not recover:
            if vm_operation == "save":
                save_option = ""
                result = virsh.save(vm_name,
                                    save_file,
                                    save_option,
                                    ignore_status=True,
                                    debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "managedsave":
                managedsave_option = ""
                result = virsh.managedsave(vm_name,
                                           managedsave_option,
                                           ignore_status=True,
                                           debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s3":
                suspend_target = "mem"
                result = virsh.dompmsuspend(vm_name,
                                            suspend_target,
                                            ignore_status=True,
                                            debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s4":
                suspend_target = "disk"
                result = virsh.dompmsuspend(vm_name,
                                            suspend_target,
                                            ignore_status=True,
                                            debug=True)
                libvirt.check_exit_status(result)
                # Wait domain state change: 'in shutdown' -> 'shut off'
                utils_misc.wait_for(lambda: virsh.is_dead(vm_name), 5)
            elif vm_operation == "suspend":
                result = virsh.suspend(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "reboot":
                vm.reboot()
                vm_uptime_init = vm.uptime()
            else:
                logging.debug("No operation for the domain")

        else:
            if vm_operation == "save":
                if os.path.exists(save_file):
                    result = virsh.restore(save_file,
                                           ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    os.remove(save_file)
                else:
                    test.error("No save file for domain restore")
            elif vm_operation in ["managedsave", "s4"]:
                result = virsh.start(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s3":
                suspend_target = "mem"
                result = virsh.dompmwakeup(vm_name,
                                           ignore_status=True,
                                           debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "suspend":
                result = virsh.resume(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "reboot":
                pass
            else:
                logging.debug("No need recover the domain")

    def online_new_vcpu(vm, vcpu_plug_num):
        """
        For Fedora/RHEL7 guests, udev can not online hot-added CPUs
        automatically, (refer to BZ#968811 for details) so enable them manually.

        :params vm: VM object
        :params vcpu_plug_num: Hotplugged vcpu count
        """
        cpu_is_online = []
        session = vm.wait_for_login()
        for i in range(1, int(vcpu_plug_num)):
            cpu_is_online.append(False)
            cpu = "/sys/devices/system/cpu/cpu%s/online" % i
            cmd_s, cmd_o = session.cmd_status_output("cat %s" % cpu)
            logging.debug("cmd exist status: %s, cmd output %s", cmd_s, cmd_o)
            if cmd_s != 0:
                logging.error("Can not find cpu %s in domain", i)
            else:
                if cmd_o.strip() == "0":
                    if session.cmd_status("echo 1 > %s" % cpu) == 0:
                        cpu_is_online[i - 1] = True
                    else:
                        logging.error("Fail to enable cpu %s online", i)
                else:
                    cpu_is_online[i - 1] = True
        session.close()
        return False not in cpu_is_online

    def check_setvcpus_result(cmd_result, expect_error):
        """
        Check command result.

        For setvcpus, pass unsupported commands(plug or unplug vcpus) by
        checking command stderr.

        :params cmd_result: Command result
        :params expect_error: Whether to expect error True or False
        """
        if cmd_result.exit_status != 0:
            if expect_error:
                logging.debug("Expect fail: %s", cmd_result.stderr)
                return
            # setvcpu/hotplug is only available as of qemu 1.5 and it's still
            # evolving. In general the addition of vcpu's may use the QMP
            # "cpu_set" (qemu 1.5) or "cpu-add" (qemu 1.6 and later) commands.
            # The removal of vcpu's may work in qemu 1.5 due to how cpu_set
            # can set vcpus online or offline; however, there doesn't appear
            # to be a complementary cpu-del feature yet, so we can add, but
            # not delete in 1.6.

            # A 1.6 qemu will not allow the cpu-add command to be run on
            # a configuration using <os> machine property 1.4 or earlier.
            # That is the XML <os> element with the <type> property having
            # an attribute 'machine' which is a tuple of 3 elements separated
            # by a dash, such as "pc-i440fx-1.5" or "pc-q35-1.5".
            if re.search("unable to execute QEMU command 'cpu-add'",
                         cmd_result.stderr):
                test.cancel("guest <os> machine property may be too"
                            "  old to allow hotplug")

            # A qemu older than 1.5 or an unplug for 1.6 will result in
            # the following failure.  In general, any time libvirt determines
            # it cannot support adding or removing a vCPU...
            if re.search("cannot change vcpu count of this domain",
                         cmd_result.stderr):
                test.cancel("Unsupported virsh setvcpu hotplug")

            # Maybe QEMU doesn't support unplug vcpu
            if re.search(
                    "Operation not supported: qemu didn't unplug the vCPUs",
                    cmd_result.stderr):
                test.cancel("Your qemu unsupported unplug vcpu")

            # Qemu guest agent version could be too low
            if re.search("The command guest-get-vcpus has not been found",
                         cmd_result.stderr):
                err_msg = "Your agent version is too low: %s" % cmd_result.stderr
                logging.warning(err_msg)
                test.cancel(err_msg)

            # Attempting to enable more vCPUs in the guest than is currently
            # enabled in the guest but less than the maximum count for the VM
            if re.search(
                    "requested vcpu count is greater than the count of "
                    "enabled vcpus in the domain", cmd_result.stderr):
                logging.debug("Expect fail: %s", cmd_result.stderr)
                return

            # Otherwise, it seems we have a real error
            test.fail("Run failed with right command: %s" % cmd_result.stderr)
        else:
            if expect_error:
                test.fail("Expect fail but run successfully")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    global vm_uptime_init
    vm_operation = params.get("vm_operation", "null")
    vcpu_max_num = int(params.get("vcpu_max_num"))
    vcpu_current_num = int(params.get("vcpu_current_num"))
    vcpu_plug = "yes" == params.get("vcpu_plug", "no")
    vcpu_plug_num = int(params.get("vcpu_plug_num"))
    vcpu_unplug = "yes" == params.get("vcpu_unplug", "no")
    vcpu_unplug_num = int(params.get("vcpu_unplug_num"))
    vcpu_max_timeout = int(params.get("vcpu_max_timeout", "480"))
    setvcpu_option = params.get("setvcpu_option", "")
    agent_channel = "yes" == params.get("agent_channel", "yes")
    install_qemuga = "yes" == params.get("install_qemuga", "no")
    start_qemuga = "yes" == params.get("start_qemuga", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    setvcpu_readonly = "yes" == params.get("setvcpu_readonly", "no")
    status_error = "yes" == params.get("status_error", "no")
    pin_before_plug = "yes" == params.get("pin_before_plug", "no")
    pin_after_plug = "yes" == params.get("pin_after_plug", "no")
    pin_before_unplug = "yes" == params.get("pin_before_unplug", "no")
    pin_after_unplug = "yes" == params.get("pin_after_unplug", "no")
    pin_vcpu = params.get("pin_vcpu")
    pin_cpu_list = params.get("pin_cpu_list", "x")
    check_after_plug_fail = "yes" == params.get("check_after_plug_fail", "no")
    with_stress = "yes" == params.get("run_stress", "no")
    iterations = int(params.get("test_itr", 1))
    topology_correction = "yes" == params.get("topology_correction", "no")
    # Init expect vcpu count values
    expect_vcpu_num = {
        'max_config': vcpu_max_num,
        'max_live': vcpu_max_num,
        'cur_config': vcpu_current_num,
        'cur_live': vcpu_current_num,
        'guest_live': vcpu_current_num
    }
    if check_after_plug_fail:
        expect_vcpu_num_bk = expect_vcpu_num.copy()
    # Init expect vcpu pin values
    expect_vcpupin = {}
    result_failed = 0

    # Init cpu-list for vcpupin
    host_cpu_count = os.sysconf('SC_NPROCESSORS_CONF')
    if (int(host_cpu_count) < 2) and (not pin_cpu_list == "x"):
        test.cancel("We need more cpus on host in this case for the cpu-list"
                    "=%s. But current number of cpu on host is %s." %
                    (pin_cpu_list, host_cpu_count))

    cpus_list = cpu_util.cpu_online_list()
    logging.debug("Active cpus in host are %s", cpus_list)

    cpu_seq_str = ""
    for i in range(len(cpus_list) - 1):
        if int(cpus_list[i]) + 1 == int(cpus_list[i + 1]):
            cpu_seq_str = "%s-%s" % (cpus_list[i], cpus_list[i + 1])
            break

    if pin_cpu_list == "x":
        pin_cpu_list = cpus_list[-1]
    if pin_cpu_list == "x-y":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str
        else:
            pin_cpu_list = "%s-%s" % (cpus_list[0], cpus_list[0])
    elif pin_cpu_list == "x,y":
        pin_cpu_list = "%s,%s" % (cpus_list[0], cpus_list[1])
    elif pin_cpu_list == "x-y,^z":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str + ",^%s" % cpu_seq_str.split('-')[1]
        else:
            pin_cpu_list = "%s,%s,^%s" % (cpus_list[0], cpus_list[1],
                                          cpus_list[0])
    else:
        # Just use the value get from cfg
        pass

    need_mkswap = False
    # Back up domain XML
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    try:
        # Customize domain vcpu number
        if vm.is_alive():
            vm.destroy()
        if agent_channel:
            vmxml.set_agent_channel()
        else:
            vmxml.remove_agent_channels()
        vmxml.sync()

        vmxml.set_vm_vcpus(vm_name,
                           vcpu_max_num,
                           vcpu_current_num,
                           topology_correction=topology_correction)
        # Do not apply S3/S4 on power
        cpu_arch = platform.machine()
        if cpu_arch in ('x86_64', 'i386', 'i686'):
            vmxml.set_pm_suspend(vm_name, "yes", "yes")
        vm.start()
        vm_uptime_init = vm.uptime()
        if with_stress:
            testlist = utils_test.get_avocadotestlist(params)
            bt = utils_test.run_avocado_bg(vm, params, test, testlist)
            if not bt:
                test.cancel("guest stress failed to start")
        # Create swap partition/file if necessary
        if vm_operation == "s4":
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition")
            vm.create_swap_partition()

        # Prepare qemu guest agent
        if install_qemuga:
            vm.prepare_guest_agent(prepare_xml=False, start=start_qemuga)
            vm.setenforce(0)
        else:
            # Remove qemu-guest-agent for negative test
            vm.remove_package('qemu-guest-agent')

        # Run test
        for _ in range(iterations):
            if not cpu.check_vcpu_value(vm, expect_vcpu_num):
                logging.error("Expected vcpu check failed")
                result_failed += 1
            # plug vcpu
            if vcpu_plug:
                # Pin vcpu
                if pin_before_plug:
                    result = virsh.vcpupin(vm_name,
                                           pin_vcpu,
                                           pin_cpu_list,
                                           ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    expect_vcpupin = {pin_vcpu: pin_cpu_list}

                result = virsh.setvcpus(vm_name,
                                        vcpu_plug_num,
                                        setvcpu_option,
                                        readonly=setvcpu_readonly,
                                        ignore_status=True,
                                        debug=True)
                check_setvcpus_result(result, status_error)

                if setvcpu_option == "--config":
                    expect_vcpu_num['cur_config'] = vcpu_plug_num
                elif setvcpu_option == "--guest":
                    # vcpuset '--guest' only affect vcpu number in guest
                    expect_vcpu_num['guest_live'] = vcpu_plug_num
                else:
                    expect_vcpu_num['cur_live'] = vcpu_plug_num
                    expect_vcpu_num['guest_live'] = vcpu_plug_num
                    if not status_error:
                        if not utils_misc.wait_for(
                                lambda: cpu.check_if_vm_vcpu_match(
                                    vcpu_plug_num, vm),
                                vcpu_max_timeout,
                                text="wait for vcpu online"
                        ) or not online_new_vcpu(vm, vcpu_plug_num):
                            test.fail("Fail to enable new added cpu")

                # Pin vcpu
                if pin_after_plug:
                    result = virsh.vcpupin(vm_name,
                                           pin_vcpu,
                                           pin_cpu_list,
                                           ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    expect_vcpupin = {pin_vcpu: pin_cpu_list}

                if status_error and check_after_plug_fail:
                    if not cpu.check_vcpu_value(vm, expect_vcpu_num_bk, {},
                                                setvcpu_option):
                        logging.error("Expected vcpu check failed")
                        result_failed += 1

                if not status_error:
                    if restart_libvirtd:
                        utils_libvirtd.libvirtd_restart()

                    # Check vcpu number and related commands
                    if not cpu.check_vcpu_value(vm, expect_vcpu_num,
                                                expect_vcpupin,
                                                setvcpu_option):
                        logging.error("Expected vcpu check failed")
                        result_failed += 1

                    # Control domain
                    manipulate_domain(vm_name, vm_operation)

                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        if not cpu.check_vcpu_value(vm, expect_vcpu_num,
                                                    expect_vcpupin,
                                                    setvcpu_option):
                            logging.error("Expected vcpu check failed")
                            result_failed += 1

                    # Recover domain
                    manipulate_domain(vm_name, vm_operation, recover=True)

                    # Resume domain from S4 status may takes long time(QEMU bug),
                    # here we wait for 10 mins then skip the remaining part of
                    # tests if domain not resume successfully
                    try:
                        vm.wait_for_login(timeout=600)
                    except Exception as e:
                        test.cancel("Skip remaining test steps as domain"
                                    " not resume in 10 mins: %s" % e)
                    # For hotplug/unplug vcpu without '--config flag, after
                    # suspend domain to disk(shut off) and re-start it, the
                    # current live vcpu number will recover to orinial value
                    if vm_operation == 's4':
                        if setvcpu_option.count("--config"):
                            expect_vcpu_num['cur_live'] = vcpu_plug_num
                            expect_vcpu_num['guest_live'] = vcpu_plug_num
                        elif setvcpu_option.count("--guest"):
                            expect_vcpu_num['guest_live'] = vcpu_plug_num
                        else:
                            expect_vcpu_num['cur_live'] = vcpu_current_num
                            expect_vcpu_num['guest_live'] = vcpu_current_num
                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        if not cpu.check_vcpu_value(vm, expect_vcpu_num,
                                                    expect_vcpupin,
                                                    setvcpu_option):
                            logging.error("Expected vcpu check failed")
                            result_failed += 1

            # Unplug vcpu
            # Since QEMU 2.2.0, by default all current vcpus are non-hotpluggable
            # when VM started , and it required that vcpu 0(id=1) is always
            # present and non-hotpluggable, which means we can't hotunplug these
            # vcpus directly. So we can either hotplug more vcpus before we do
            # hotunplug, or modify the 'hotpluggable' attribute to 'yes' of the
            # vcpus except vcpu 0, to make sure libvirt can find appropriate
            # hotpluggable vcpus to reach the desired target vcpu count. For
            # simple prepare step, here we choose to hotplug more vcpus.
            if vcpu_unplug:
                if setvcpu_option == "--live":
                    logging.info("Hotplug vcpu to the maximum count to make"
                                 "sure all these new plugged vcpus are "
                                 "hotunpluggable")
                    result = virsh.setvcpus(vm_name,
                                            vcpu_max_num,
                                            '--live',
                                            debug=True)
                    libvirt.check_exit_status(result)
                # Pin vcpu
                if pin_before_unplug:
                    result = virsh.vcpupin(vm_name,
                                           pin_vcpu,
                                           pin_cpu_list,
                                           ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    # As the vcpu will unplug later, so set
                    # expect_vcpupin to empty
                    expect_vcpupin = {}

                # Operation of setvcpus is asynchronization, even if it return,
                # may not mean it is complete, a poll checking of guest vcpu numbers
                # need to be executed.
                # So for case of unpluging vcpus from max vcpu number to 1, when
                # setvcpus return, need continue to obverse if vcpu number is
                # continually to be unplugged to 1 gradually.
                result = virsh.setvcpus(vm_name,
                                        vcpu_unplug_num,
                                        setvcpu_option,
                                        readonly=setvcpu_readonly,
                                        ignore_status=True,
                                        debug=True)
                unsupport_str = cpu.vcpuhotunplug_unsupport_str()
                if unsupport_str and (unsupport_str in result.stderr):
                    test.cancel("Vcpu hotunplug is not supported in this host:"
                                "\n%s" % result.stderr)
                try:
                    session = vm.wait_for_login()
                    cmd = "lscpu | grep \"^CPU(s):\""
                    operation = "setvcpus"
                    prev_output = -1
                    while True:
                        ret, output = session.cmd_status_output(cmd)
                        if ret:
                            test.error("Run lscpu failed, output: %s" % output)
                        output = output.split(":")[-1].strip()

                        if int(prev_output) == int(output):
                            break
                        prev_output = output
                        time.sleep(5)
                    logging.debug(
                        "CPUs available from inside guest after %s - %s",
                        operation, output)
                    if int(output) != vcpu_unplug_num:
                        test.fail("CPU %s failed as cpus are not "
                                  "reflected from inside guest" % operation)
                finally:
                    if session:
                        session.close()

                check_setvcpus_result(result, status_error)
                if setvcpu_option == "--config":
                    expect_vcpu_num['cur_config'] = vcpu_unplug_num
                elif setvcpu_option == "--guest":
                    # vcpuset '--guest' only affect vcpu number in guest
                    expect_vcpu_num['guest_live'] = vcpu_unplug_num
                else:
                    expect_vcpu_num['cur_live'] = vcpu_unplug_num
                    expect_vcpu_num['guest_live'] = vcpu_unplug_num

                # Pin vcpu
                if pin_after_unplug:
                    result = virsh.vcpupin(vm_name,
                                           pin_vcpu,
                                           pin_cpu_list,
                                           ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    expect_vcpupin = {pin_vcpu: pin_cpu_list}

                if not status_error:
                    if restart_libvirtd:
                        utils_libvirtd.libvirtd_restart()

                    # Check vcpu number and related commands
                    if not cpu.check_vcpu_value(vm, expect_vcpu_num,
                                                expect_vcpupin,
                                                setvcpu_option):
                        logging.error("Expected vcpu check failed")
                        result_failed += 1

                    # Control domain
                    manipulate_domain(vm_name, vm_operation)

                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        if not cpu.check_vcpu_value(vm, expect_vcpu_num,
                                                    expect_vcpupin,
                                                    setvcpu_option):
                            logging.error("Expected vcpu check failed")
                            result_failed += 1

                    # Recover domain
                    manipulate_domain(vm_name, vm_operation, recover=True)

                    # Resume domain from S4 status may takes long time
                    # (QEMU bug), here we wait for 10 mins then skip the
                    # remaining part of tests if domain not resume successfully
                    try:
                        vm.wait_for_login(timeout=600)
                    except Exception as e:
                        test.cancel("Skip remaining test steps as domain"
                                    " not resume in 10 mins: %s" % e)
                    # For hotplug/unplug vcpu without '--config flag, after
                    # suspend domain to disk(shut off) and re-start it, the
                    # current live vcpu number will recover to orinial value
                    if vm_operation == 's4':
                        if setvcpu_option.count("--config"):
                            expect_vcpu_num['cur_live'] = vcpu_unplug_num
                            expect_vcpu_num['guest_live'] = vcpu_unplug_num
                        elif setvcpu_option.count("--guest"):
                            expect_vcpu_num['guest_live'] = vcpu_unplug_num
                        else:
                            expect_vcpu_num['cur_live'] = vcpu_current_num
                            expect_vcpu_num['guest_live'] = vcpu_current_num
                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        if not cpu.check_vcpu_value(vm, expect_vcpu_num,
                                                    expect_vcpupin,
                                                    setvcpu_option):
                            logging.error("Expected vcpu check failed")
                            result_failed += 1
        if vm.uptime() < vm_uptime_init:
            test.fail("Unexpected VM reboot detected in between test")
    # Recover env
    finally:
        if need_mkswap:
            vm.cleanup_swap()
        if with_stress:
            if "bt" in locals() and bt:
                bt.join()
        vm.destroy()
        backup_xml.sync()

    if not status_error:
        if result_failed > 0:
            test.fail("Test Failed")
Ejemplo n.º 8
0
def run(test, params, env):
    """
    Test vcpu affinity feature as follows:
    positive test:
        1. use vcpu cpuset in xml to define vcpu affinity
        2. use cputune cpuset in xml to define vcpu affinity
        3. use offline-to-online host cpu as cpuset to run virsh vcpupin
        4. set vcpu placement in xml to auto and check xml result
        5. set vcpu cpuset in xml without placement defined and check xml result
        6. specify vcpu affinity for inactive vcpu
    negative test:
        1. use outrange cpuset as vcpu cpuset in xml to define vcpu affinity
        2. use outrange cpuset as cputune cpuset in xml to define vcpu affinity
        3. use invalid cpuset as cputune cpuset in xml to define vcpu affinity
        4. use duplicate vcpu in xml to define vcpu affinity
        5. use offline host cpu as cputune cpuset to run virsh vcpupin
        6. set vcpu affinity for none exists vcpu and check xml result
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    cpuset_mask = params.get("cpuset_mask", "")
    vcpu = params.get("vcpu", "0")
    setvcpus_option = params.get("setvcpus_option", "")
    setvcpus_count = params.get("setvcpus_count", "0")
    vcpupin_option = params.get("vcpupin_option", "")
    maxvcpu = params.get("maxvcpu", "8")
    current_vcpu = params.get("current_vcpu", "3")
    check = params.get("check", "")
    config_xml = params.get("config_xml", "")

    status_error = "yes" == params.get("status_error", "no")
    define_fail = "yes" == params.get("define_fail", "no")
    start_fail = "yes" == params.get("start_fail", "no")
    runtime_fail = "yes" == params.get("runtime_fail", "no")
    hotplug_vcpu = "yes" == params.get("hotplug_vcpu", "no")

    vcpu_cpuset = params.get("vcpu_cpuset", "")
    cputune_cpuset = params.get("cputune_cpuset", "")
    vcpu_placement = params.get("vcpu_placement", "static")
    err_msg = params.get("err_msg", "")
    start_timeout = int(params.get("start_timeout", "180"))
    offline_hostcpus = params.get("offline_hostcpus", "")

    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    def check_vcpu_affinity():
        """
        check vcpu affinity defined by vcpu cpuset or cputune cpuset
        """
        affinity = vcpu_cpuset if not cputune_cpuset else cputune_cpuset
        affinity = {vcpu: affinity}
        virsh.vcpuinfo(vm_name, debug=True)
        host_cpu_count = cpuutil.total_cpus_count()

        vmxml_live = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(vmxml_live)

        # if vcpu >= maxvcpu, the cputune should not exist in xml
        if int(vcpu) >= int(maxvcpu):
            try:
                if hasattr(vmxml_live, 'cputune'):
                    test.fail("cputune tag is set when vcpu >= maxvcpu")
            except xcepts.LibvirtXMLError:
                pass
        elif "config" in vcpupin_option:
            vcpu_affinity = cpu.affinity_from_vcpupin(vm, vcpu, vcpupin_option)
            affinity = cpu.cpus_string_to_affinity_list(
                str(affinity[vcpu]), host_cpu_count)
            logging.debug("vcpu_affinity {}".format(vcpu_affinity))
            logging.debug("affinity {}".format(affinity))
            if vcpu_affinity[int(vcpu)] != affinity:
                test.fail("vcpu affinity check fail")
        # check the expected vcpu affinity with the one got from running vm
        elif not cpu.check_affinity(vm, affinity):
            test.fail("vcpu affinity check fail")

    try:
        hostcpu_num = int(cpuutil.total_cpus_count())
        if hostcpu_num < 8:
            test.cancel("The host should have at least 8 CPUs for this test.")

        # online all host cpus
        online_cpus = cpuutil.cpu_online_list()
        for x in range(1, hostcpu_num):
            if x not in online_cpus and cpuutil.online(x):
                test.fail("fail to online cpu{}".format(x))

        # use vcpu cpuset or/and cputune cpuset to define xml
        del vmxml.cputune
        del vmxml.vcpus
        del vmxml.placement
        vmxml.vcpu = int(maxvcpu)
        vmxml.current_vcpu = current_vcpu

        # Remove cpu topology to avoid that it doesn't match vcpu count
        if vmxml.get_cpu_topology():
            new_cpu = vmxml.cpu
            del new_cpu.topology
            vmxml.cpu = new_cpu

        # config vcpu cpuset for cpuset range test
        num = 1 if not status_error else 0
        cpuset_new = "0-{},^{}".format(hostcpu_num-num, cpuset_mask)
        if (config_xml == "vcpu" and check.endswith("range_cpuset")):
            vcpu_cpuset = cpuset_new
        vmxml.cpuset = vcpu_cpuset

        if vcpu_placement:
            vmxml.placement = vcpu_placement

            # Remove numatune node since it will be automatically set
            # under 'auto' state
            if vcpu_placement == 'auto':
                vmxml.xmltreefile.remove_by_xpath('/numatune', remove_all=True)
                vmxml.xmltreefile.write()

        if config_xml == "cputune":
            cputune = vm_xml.VMCPUTuneXML()
            if check.endswith("range_cpuset"):
                cputune_cpuset = cpuset_new
            if check.endswith("duplicate_vcpu"):
                cputune.vcpupins = [{'vcpu': vcpu, 'cpuset': "2"}, {'vcpu': vcpu, 'cpuset': "3"}]
            else:
                cputune.vcpupins = [{'vcpu': vcpu, 'cpuset': cputune_cpuset}]
            vmxml.cputune = cputune

        logging.debug(vmxml)
        if status_error and define_fail:
            result_to_check = virsh.define(vmxml.xml, debug=True)
        else:
            vmxml.sync()

        # test vcpu cpuset in offline/online host cpu scenario
        if check.endswith("offline_hostcpu"):
            for x in offline_hostcpus.split(','):
                if cpuutil.offline(x):
                    test.fail("fail to offline cpu{}".format(x))
                logging.debug("offline host cpu {}".format(x))

        # start the vm
        if status_error and start_fail:
            result_to_check = virsh.start(vm_name, debug=True)

        if (not status_error) or runtime_fail:
            vm.start()
            vm.wait_for_login(timeout=start_timeout).close()

            # test vcpu cpuset in offline/online host cpu scenario
            if check.endswith("offline_hostcpu") and not status_error:
                # online host cpu
                if cpuutil.online(cputune_cpuset):
                    test.fail("fail to online cpu{}".format(cputune_cpuset))

            # run virsh vcpupin to config vcpu affinity
            if check.startswith("cputune") and (not config_xml):
                result_to_check = virsh.vcpupin(vm_name, vcpu, cputune_cpuset, vcpupin_option, debug=True)

            # hotplug vcpu test scenario
            if hotplug_vcpu:
                virsh.setvcpus(vm_name, setvcpus_count, setvcpus_option, debug=True, ignore_status=False)

            libvirtd_restart = False
            while True:
                if check == "vcpu_placement":
                    check_vcpu_placement(test, params)
                elif not status_error:
                    check_vcpu_affinity()
                if libvirtd_restart:
                    break
                # restart libvirtd and check vcpu affinity again
                utils_libvirtd.Libvirtd().restart()
                libvirtd_restart = True

        if 'result_to_check' in locals():
            if err_msg:
                err_msg = err_msg.split(";")
            libvirt.check_result(result_to_check, err_msg)

    finally:
        vmxml_backup.sync()

        # recovery the host cpu env
        for x in range(1, hostcpu_num):
            cpuutil.online(x)
Ejemplo n.º 9
0
def run(test, params, env):
    """
    Test the command virsh vcpupin

    (1) Get the host and guest cpu count
    (2) Call virsh vcpupin for each vcpu with pinning of each cpu
    (3) Check whether the virsh vcpupin has pinned the respective vcpu to cpu
    """

    def affinity_from_vcpuinfo(vm_name, vcpu):
        """
        This function returns list of the vcpu's affinity from
        virsh vcpuinfo output

        :param vm_name: VM Name to operate on
        :param vcpu: vcpu number for which the affinity is required
        """

        output = virsh.vcpuinfo(vm_name).stdout.rstrip()
        affinity = re.findall('CPU Affinity: +[-y]+', output)
        total_affinity = affinity[int(vcpu)].split()[-1].strip()
        actual_affinity = list(total_affinity)
        return actual_affinity

    def affinity_from_vcpupin(vm_name, vcpu):
        """
        This function returns list of vcpu's affinity from vcpupin output

        :param vm_name: VM Name
        :param vcpu: VM cpu pid
        :return : list of affinity to vcpus
        """
        total_cpu = process.run("ls -d /sys/devices/system/cpu/cpu[0-9]* |wc -l", shell=True).stdout.strip()
        vcpus_affinity = {}
        output = virsh.vcpupin(vm_name).stdout
        for item in output.split('\n')[2:-2]:
            vcpus_affinity[item.split(':')[0].strip()] = item.split(':')[1].strip()
        return utils_test.libvirt.cpus_string_to_affinity_list(
            vcpus_affinity[str(vcpu)], int(total_cpu))

    def check_vcpupin(vm_name, vcpu, cpu_list, pid, vcpu_pid):
        """
        This function checks the actual and the expected affinity of given vcpu
        and raises error if not matchs

        :param vm_name: VM Name to operate on
        :param vcpu: vcpu number for which the affinity is required
        :param cpu: cpu details for the affinity
        :param pid: VM pid
        :param vcpu: VM cpu pid
        """

        total_cpu = process.run("ls -d /sys/devices/system/cpu/cpu[0-9]* |wc -l", shell=True).stdout.strip()
        logging.debug("Debug: cpulist %s", cpu_list)
        expected_output = utils_test.libvirt.cpus_string_to_affinity_list(
            cpu_list,
            int(total_cpu))
        logging.debug("Expected affinity: %s", expected_output)

        # Check for affinity value from vcpuinfo output
        actual_output = affinity_from_vcpuinfo(vm_name, vcpu)
        logging.debug("Actual affinity in vcpuinfo output: %s", actual_output)
        if expected_output == actual_output:
            logging.info("successfully pinned cpu_list: %s --> vcpu: %s",
                         cpu_list, vcpu)
        else:
            test.fail("Cpu pinning details not updated properly in"
                      " virsh vcpuinfo command output")

        # Check for affinity value from vcpupin output
        actual_output_vcpupin = affinity_from_vcpupin(vm_name, vcpu)
        logging.debug("Actual affinity in vcpupin output: %s", actual_output_vcpupin)
        if expected_output == actual_output_vcpupin:
            logging.info("successfully pinned cpu_list: %s --> vcpu: %s",
                         cpu_list, vcpu)
        else:
            test.fail("Cpu pinning details not updated properly in"
                      " virsh vcpupin command output")

        if pid is None:
            return
        # Get the actual cpu affinity value in the proc entry
        output = utils_test.libvirt.cpu_allowed_list_by_task(pid, vcpu_pid)
        actual_output_proc = utils_test.libvirt.cpus_string_to_affinity_list(
            output,
            int(total_cpu))
        logging.debug("Actual affinity in guest proc: %s", actual_output_proc)
        if expected_output == actual_output_proc:
            logging.info("successfully pinned vcpu: %s --> cpu: %s"
                         " in respective proc entry", vcpu, cpu_list)
        else:
            test.fail("Cpu pinning details are not "
                      "updated properly in /proc/"
                      "%s/task/%s/status" % (pid, vcpu_pid))

    def run_and_check_vcpupin(vm, vm_ref, vcpu, cpu_list, options):
        """
        Run the vcpupin command and then check the result.
        """
        if vm_ref == "name":
            vm_ref = vm.name
        elif vm_ref == "uuid":
            vm_ref = vm.get_uuid()
        # Execute virsh vcpupin command.
        cmdResult = virsh.vcpupin(vm_ref, vcpu, cpu_list, options, debug=True)
        if cmdResult.exit_status:
            if not status_error:
                # Command fail and it is positive case.
                test.fail(cmdResult)
            else:
                # Command fail and it is negative case.
                return
        else:
            if status_error:
                # Command success and it is negative case.
                test.fail(cmdResult)
            else:
                # Command success and it is positive case.
                # "--config" will take effect after VM destroyed.
                pid = None
                vcpu_pid = None
                if options == "--config":
                    virsh.destroy(vm.name)
                else:
                    pid = vm.get_pid()
                    logging.debug("vcpus_pid: %s", vm.get_vcpus_pid())
                    vcpu_pid = vm.get_vcpus_pid()[vcpu]
                # Check the result of vcpupin command.
                check_vcpupin(vm.name, vcpu, cpu_list, pid, vcpu_pid)

    def offline_pin_and_check(vm, vcpu, cpu_list):
        """
        Edit domain xml to pin vcpu and check the result.
        """
        cputune = vm_xml.VMCPUTuneXML()
        cputune.vcpupins = [{'vcpu': str(vcpu), 'cpuset': cpu_list}]
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        vmxml.cputune = cputune
        vmxml.sync()
        utils_misc.wait_for(lambda: vm.state() == "shut off", 10)
        cmdResult = virsh.start(vm.name, debug=True)
        libvirt.check_exit_status(cmdResult, status_error)
        pid = vm.get_pid()
        vcpu_pid = vm.get_vcpus_pid()[vcpu]
        check_vcpupin(vm.name, vcpu, cpu_list, pid, vcpu_pid)

    if not virsh.has_help_command('vcpucount'):
        test.cancel("This version of libvirt doesn't"
                    " support this test")
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    # Get the variables for vcpupin command.
    vm_ref = params.get("vcpupin_vm_ref", "name")
    options = params.get("vcpupin_options", "--current")
    cpu_list = params.get("vcpupin_cpu_list", "x")
    start_vm = ("yes" == params.get("start_vm", "yes"))
    vcpupin_initial = ("yes" == params.get("vcpupin_initial", "no"))

    # Get status of this case.
    status_error = ("yes" == params.get("status_error", "no"))

    # Edit domain xml to pin vcpus
    offline_pin = ("yes" == params.get("offline_pin", "no"))

    # Backup for recovery.
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    if start_vm and vm.state() == "shut off":
        cmdResult = virsh.start(vm.name, debug=True)
        libvirt.check_exit_status(cmdResult, status_error)

    # Get the guest vcpu count
    if offline_pin:
        vcpucount_option = "--config --active"
    else:
        vcpucount_option = "--live --active"
    guest_vcpu_count = virsh.vcpucount(vm_name,
                                       vcpucount_option).stdout.strip()

    # Find the alive cpus list
    cpus_list = map(str, cpuutils.cpu_online_list())
    logging.info("Active cpus in host are %s", cpus_list)

    try:
        # Control multi domain vcpu affinity
        multi_dom = ("yes" == params.get("multi_dom_pin", "no"))
        vm2 = None
        # Before doing any vcpupin actions, lets check whether
        # initial pinning state is fine
        if vcpupin_initial:
            pid = vm.get_pid()
            logging.debug("vcpus_pid: %s vcpu count: %s", vm.get_vcpus_pid(), guest_vcpu_count)
            for vcpu in range(int(guest_vcpu_count)):
                vcpu_pid = vm.get_vcpus_pid()[vcpu]
                # Check the result of vcpupin command.
                check_vcpupin(vm.name, vcpu, str(','.join(cpus_list)), pid, vcpu_pid)
            return

        if multi_dom:
            vm_names = params.get("vms").split()
            if len(vm_names) > 1:
                vm2 = env.get_vm(vm_names[1])
            else:
                test.error("Need more than one domains")
            if not vm2:
                test.cancel("No %s find" % vm_names[1])
            vm2.destroy()
            vm2xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm2.name)
            vm2xml_backup = vm2xml.copy()
            # Make sure vm2 has the same cpu numbers with vm
            vm2xml.set_vm_vcpus(vm2.name, int(guest_vcpu_count), guest_vcpu_count)
            if start_vm:
                vm2.start()

        # Run cases when guest is shutoff.
        if not offline_pin:
            if vm.is_dead() and not start_vm:
                run_and_check_vcpupin(vm, vm_ref, 0, 0, "")
                return
        # Get the host cpu count
        host_online_cpu_count = len(cpus_list)
        online_cpu_max = max(map(int, cpus_list))
        host_cpu_count = cpuutils.total_cpus_count()
        cpu_max = int(host_cpu_count) - 1
        if (host_online_cpu_count < 2) and (not cpu_list == "x"):
            test.cancel("We need more cpus on host in this "
                        "case for the cpu_list=%s. But "
                        "current number of cpu on host is %s."
                        % (cpu_list, host_online_cpu_count))

        # Run test case
        for vcpu in range(int(guest_vcpu_count)):
            if cpu_list == "x":
                for cpu in cpus_list:
                    left_cpus = "0-%s,^%s" % (online_cpu_max, cpu)
                    if offline_pin:
                        offline_pin_and_check(vm, vcpu, str(cpu))
                        if multi_dom:
                            offline_pin_and_check(vm2, vcpu, left_cpus)
                    else:
                        run_and_check_vcpupin(vm, vm_ref, vcpu, str(cpu),
                                              options)
                        if multi_dom:
                            run_and_check_vcpupin(vm2, "name", vcpu, left_cpus,
                                                  options)
            else:
                if cpu_list == "x-y":
                    cpus = "0-%s" % online_cpu_max
                elif cpu_list == "x,y":
                    cpus = ','.join(random.sample(cpus_list, 2))
                    logging.info(cpus)
                elif cpu_list == "x-y,^z":
                    cpus = "0-%s,^%s" % (online_cpu_max, online_cpu_max)
                elif cpu_list == "r":
                    cpus = "r"
                elif cpu_list == "-1":
                    cpus = "-1"
                elif cpu_list == "out_of_max":
                    cpus = str(cpu_max + 1)
                else:
                    test.cancel("Cpu_list=%s is not recognized."
                                % cpu_list)
                if offline_pin:
                    offline_pin_and_check(vm, vcpu, cpus)
                else:
                    run_and_check_vcpupin(vm, vm_ref, vcpu, cpus, options)
    finally:
        # Recover xml of vm.
        vmxml_backup.sync()
        if vm2:
            vm2xml_backup.sync()
Ejemplo n.º 10
0
    def test_s390x_cpu_online(self):
        s390x = u"""vendor_id       : IBM/S390
# processors    : 2
bogomips per cpu: 2913.00
max thread id   : 0
features        : esan3 zarch stfle msa ldisp eimm dfp edat etf3eh highgprs te sie
facilities      : 0 1 2 3 4 6 7 8 9 10 12 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 30 31 32 33 34 35 36 37 40 41 42 43 44 45 46 47 48 49 50 51 52 57 73 74 75 76 77
cache0          : level=1 type=Data scope=Private size=96K line_size=256 associativity=6
cache1          : level=1 type=Instruction scope=Private size=64K line_size=256 associativity=4
cache2          : level=2 type=Data scope=Private size=1024K line_size=256 associativity=8
cache3          : level=2 type=Instruction scope=Private size=1024K line_size=256 associativity=8
cache4          : level=3 type=Unified scope=Shared size=49152K line_size=256 associativity=12
cache5          : level=4 type=Unified scope=Shared size=393216K line_size=256 associativity=24
processor 0: version = FF,  identification = 32C047,  machine = 2827
processor 1: version = FF,  identification = 32C047,  machine = 2827

cpu number      : 0
cpu MHz dynamic : 5504
cpu MHz static  : 5504

cpu number      : 1
cpu MHz dynamic : 5504
cpu MHz static  : 5504

"""

        s390x_2 = u"""vendor_id       : IBM/S390
# processors    : 4
bogomips per cpu: 2913.00
max thread id   : 0
features        : esan3 zarch stfle msa ldisp eimm dfp edat etf3eh highgprs te sie
facilities      : 0 1 2 3 4 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 30 31 32 33 34 35 36 37 40 41 42 43 44 45 46 47 48 49 50 51 52 57 64 65 66 67 68 69 70 71 72 73 75 76 77 78 131 132
cache0          : level=1 type=Data scope=Private size=96K line_size=256 associativity=6
cache1          : level=1 type=Instruction scope=Private size=64K line_size=256 associativity=4
cache2          : level=2 type=Data scope=Private size=1024K line_size=256 associativity=8
cache3          : level=2 type=Instruction scope=Private size=1024K line_size=256 associativity=8
cache4          : level=3 type=Unified scope=Shared size=49152K line_size=256 associativity=12
cache5          : level=4 type=Unified scope=Shared size=393216K line_size=256 associativity=24
processor 0: version = 00,  identification = 3FC047,  machine = 2827
processor 1: version = 00,  identification = 3FC047,  machine = 2827
processor 2: version = 00,  identification = 3FC047,  machine = 2827
processor 3: version = 00,  identification = 3FC047,  machine = 2827

cpu number      : 0
cpu MHz dynamic : 5504
cpu MHz static  : 5504

cpu number      : 1
cpu MHz dynamic : 5504
cpu MHz static  : 5504

cpu number      : 2
cpu MHz dynamic : 5504
cpu MHz static  : 5504

cpu number      : 3
cpu MHz dynamic : 5504
cpu MHz static  : 5504
"""

        with mock.patch('avocado.utils.cpu.platform.machine', return_value='s390x'):
            with mock.patch('avocado.utils.cpu.open',
                            return_value=self._get_file_mock(s390x)):
                self.assertEqual(len(cpu.cpu_online_list()), 2)
            with mock.patch('avocado.utils.cpu.open',
                            return_value=self._get_file_mock(s390x_2)):
                self.assertEqual(len(cpu.cpu_online_list()), 4)
Ejemplo n.º 11
0
def run(test, params, env):
    """
    Test different hmi injections with guest

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def set_condn(action, recover=False):
        """
        Set/reset guest state/action
        :param action: Guest state change/action
        :param recover: whether to recover given state default: False
        """
        if not recover:
            if action == "pin_vcpu":
                for i in range(cur_vcpu):
                    virsh.vcpupin(vm_name,
                                  i,
                                  hmi_cpu,
                                  "--live",
                                  ignore_status=False,
                                  debug=True)
                    virsh.emulatorpin(vm_name,
                                      hmi_cpu,
                                      "live",
                                      ignore_status=False,
                                      debug=True)
            elif action == "filetrans":
                utils_test.run_file_transfer(test, params, env)
            elif action == "save":
                save_file = os.path.join(data_dir.get_tmp_dir(),
                                         vm_name + ".save")
                result = virsh.save(vm_name,
                                    save_file,
                                    ignore_status=True,
                                    debug=True)
                utils_test.libvirt.check_exit_status(result)
                time.sleep(10)
                if os.path.exists(save_file):
                    result = virsh.restore(save_file,
                                           ignore_status=True,
                                           debug=True)
                    utils_test.libvirt.check_exit_status(result)
                    os.remove(save_file)
            elif action == "suspend":
                result = virsh.suspend(vm_name, ignore_status=True, debug=True)
                utils_test.libvirt.check_exit_status(result)
                time.sleep(10)
                result = virsh.resume(vm_name, ignore_status=True, debug=True)
                utils_test.libvirt.check_exit_status(result)
        return

    host_version = params.get("host_version")
    guest_version = params.get("guest_version", "")
    max_vcpu = int(params.get("ppchmi_vcpu_max", '1'))
    cur_vcpu = int(params.get("ppchmi_vcpu_cur", "1"))
    cores = int(params.get("ppchmi_cores", '1'))
    sockets = int(params.get("ppchmi_sockets", '1'))
    threads = int(params.get("ppchmi_threads", '1'))
    status_error = "yes" == params.get("status_error", "no")
    condition = params.get("condn", "")
    inject_code = params.get("inject_code", "")
    scom_base = params.get("scom_base", "")
    hmi_name = params.get("hmi_name", "")
    hmi_iterations = int(params.get("hmi_iterations", 1))

    if host_version not in cpu.get_cpu_arch():
        test.cancel("Unsupported Host cpu version")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    sm = SoftwareManager()
    if not sm.check_installed("opal-utils") and not sm.install("opal-utils"):
        test.cancel("opal-utils package install failed")
    cpus_list = cpu.cpu_online_list()
    cpu_idle_state = cpu.get_cpuidle_state()
    cpu.set_cpuidle_state()
    # Lets use second available host cpu
    hmi_cpu = cpus_list[1]
    pir = int(
        open('/sys/devices/system/cpu/cpu%s/pir' % hmi_cpu).read().strip(), 16)
    if host_version == 'power9':
        coreid = (((pir) >> 2) & 0x3f)
        nodeid = (((pir) >> 8) & 0x7f) & 0xf
        hmi_scom_addr = hex(((coreid & 0x1f + 0x20) << 24)
                            | int(scom_base, 16))
    if host_version == 'power8':
        coreid = (((pir) >> 3) & 0xf)
        nodeid = (((pir) >> 7) & 0x3f)
        hmi_scom_addr = hex(((coreid & 0xf) << 24) | int(scom_base, 16))
    hmi_cmd = "putscom -c %s %s %s" % (nodeid, hmi_scom_addr, inject_code)

    vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    org_xml = vmxml.copy()
    # Destroy the vm
    vm.destroy()
    try:
        session = None
        bgt = None
        libvirt_xml.VMXML.set_vm_vcpus(vm_name,
                                       max_vcpu,
                                       cur_vcpu,
                                       sockets=sockets,
                                       cores=cores,
                                       threads=threads,
                                       add_topology=True)
        if guest_version:
            libvirt_xml.VMXML.set_cpu_mode(vm_name, model=guest_version)
        vm.start()
        # Lets clear host and guest dmesg
        process.system("dmesg -C", verbose=False)
        session = vm.wait_for_login()
        session.cmd("dmesg -C")

        # Set condn
        if "vcpupin" in condition:
            set_condn("pin_vcpu")
        if "stress" in condition:
            utils_test.load_stress("stress_in_vms", params=params, vms=[vm])
        if "save" in condition:
            set_condn("save")
        if "suspend" in condition:
            set_condn("suspend")

        # hmi inject
        logging.debug("Injecting %s HMI on cpu %s", hmi_name, hmi_cpu)
        logging.debug("HMI Command: %s", hmi_cmd)
        process.run(hmi_cmd)

        # Check host and guest dmesg
        host_dmesg = process.system_output("dmesg -c", verbose=False)
        guest_dmesg = session.cmd_output("dmesg")
        if "Unrecovered" in host_dmesg:
            test.fail("Unrecovered host hmi\n%s", host_dmesg)
        else:
            logging.debug("Host dmesg: %s", host_dmesg)
        logging.debug("Guest dmesg: %s", guest_dmesg)
        if "save" in condition:
            set_condn("save")
        if "suspend" in condition:
            set_condn("suspend")
    finally:
        if "stress" in condition:
            utils_test.unload_stress("stress_in_vms", params=params, vms=[vm])
        if session:
            session.close()
        org_xml.sync()
        cpu.set_cpuidle_state(setstate=cpu_idle_state)
Ejemplo n.º 12
0
def run(test, params, env):
    """
    Different vcpupin scenario tests
    1) prepare the guest with given topology, memory and if any devices
    2) Start and login to the guest, check for cpu, memory
    3) Do different combinations of vcpupin and in parallel run stress
       if given
    4) Do a optional step based on config
    5) Check guest and host functional

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """

    def set_condition(vm_name, condn, reset=False, guestbt=None):
        """
        Set domain to given state or reset it.
        """
        bt = None
        if not reset:
            if condn == "avocadotest":
                bt = utils_test.run_avocado_bg(vm, params, test)
                if not bt:
                    test.cancel("guest stress failed to start")
                # Allow stress to start
                time.sleep(condn_sleep_sec)
                return bt
            elif condn == "stress":
                utils_test.load_stress("stress_in_vms", params=params, vms=[vm])
            elif condn in ["save", "managedsave"]:
                # No action
                pass
            elif condn == "suspend":
                result = virsh.suspend(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "hotplug":
                result = virsh.setvcpus(vm_name, max_vcpu, "--live",
                                        ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                exp_vcpu = {'max_config': max_vcpu, 'max_live': max_vcpu,
                            'cur_config': current_vcpu, 'cur_live': max_vcpu,
                            'guest_live': max_vcpu}
                result = cpu.check_vcpu_value(vm, exp_vcpu,
                                              option="--live")
            elif condn == "host_smt":
                if cpuutil.get_cpu_vendor_name() == 'power9':
                    result = process.run("ppc64_cpu --smt=4", shell=True)
                else:
                    test.cancel("Host SMT changes not allowed during guest live")
            else:
                logging.debug("No operation for the domain")

        else:
            if condn == "save":
                save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
                result = virsh.save(vm_name, save_file,
                                    ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                time.sleep(condn_sleep_sec)
                if os.path.exists(save_file):
                    result = virsh.restore(save_file, ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    os.remove(save_file)
                else:
                    test.error("No save file for domain restore")
            elif condn == "managedsave":
                result = virsh.managedsave(vm_name,
                                           ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                time.sleep(condn_sleep_sec)
                result = virsh.start(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "suspend":
                result = virsh.resume(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "avocadotest":
                guestbt.join(ignore_status=True)
            elif condn == "stress":
                utils_test.unload_stress("stress_in_vms", params=params, vms=[vm])
            elif condn == "hotplug":
                result = virsh.setvcpus(vm_name, current_vcpu, "--live",
                                        ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                exp_vcpu = {'max_config': max_vcpu, 'max_live': current_vcpu,
                            'cur_config': current_vcpu, 'cur_live': current_vcpu,
                            'guest_live': current_vcpu}
                result = cpu.check_vcpu_value(vm, exp_vcpu,
                                              option="--live")
            elif condn == "host_smt":
                result = process.run("ppc64_cpu --smt=2", shell=True)
                # Change back the host smt
                result = process.run("ppc64_cpu --smt=4", shell=True)
                # Work around due to known cgroup issue after cpu hot(un)plug
                # sequence
                root_cpuset_path = utils_cgroup.get_cgroup_mountpoint("cpuset")
                machine_cpuset_paths = []
                if os.path.isdir(os.path.join(root_cpuset_path,
                                              "machine.slice")):
                    machine_cpuset_paths.append(os.path.join(root_cpuset_path,
                                                             "machine.slice"))
                if os.path.isdir(os.path.join(root_cpuset_path, "machine")):
                    machine_cpuset_paths.append(os.path.join(root_cpuset_path,
                                                             "machine"))
                if not machine_cpuset_paths:
                    logging.warning("cgroup cpuset might not recover properly "
                                    "for guests after host smt changes, "
                                    "restore it manually")
                root_cpuset_cpus = os.path.join(root_cpuset_path, "cpuset.cpus")
                for path in machine_cpuset_paths:
                    machine_cpuset_cpus = os.path.join(path, "cpuset.cpus")
                    # check if file content differs
                    cmd = "diff %s %s" % (root_cpuset_cpus,
                                          machine_cpuset_cpus)
                    if process.system(cmd, verbose=True, ignore_status=True):
                        cmd = "cp %s %s" % (root_cpuset_cpus,
                                            machine_cpuset_cpus)
                        process.system(cmd, verbose=True)

            else:
                logging.debug("No need recover the domain")
        return bt

    vm_name = params.get("main_vm")
    max_vcpu = int(params.get("max_vcpu", 2))
    current_vcpu = int(params.get("current_vcpu", 1))
    vm_cores = int(params.get("limit_vcpu_cores", 2))
    vm_threads = int(params.get("limit_vcpu_threads", 1))
    vm_sockets = int(params.get("limit_vcpu_sockets", 1))
    vm = env.get_vm(vm_name)
    condition = params.get("condn", "")
    condn_sleep_sec = int(params.get("condn_sleep_sec", 30))
    pintype = params.get("pintype", "random")
    emulatorpin = "yes" == params.get("emulatorpin", "no")
    config_pin = "yes" == params.get("config_pin", "no")
    iterations = int(params.get("itr", 1))
    vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    org_xml = vmxml.copy()
    fail = False
    # Destroy the vm
    vm.destroy()
    try:
        cpus_list = cpuutil.cpu_online_list()
        if len(cpus_list) < 2:
            test.cancel("Need minimum two online host cpus")
        # Set vcpu and topology
        libvirt_xml.VMXML.set_vm_vcpus(vm_name, max_vcpu, current_vcpu,
                                       vm_sockets, vm_cores, vm_threads)
        if config_pin:
            cpustats = {}
            result = virsh.emulatorpin(vm_name, cpus_list[-1], "config",
                                       debug=True)
            libvirt.check_exit_status(result)
            result = virsh.vcpupin(vm_name, "0", cpus_list[0], "--config",
                                   ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        try:
            vm.start()
        except virt_vm.VMStartError as detail:
            test.fail("%s" % detail)

        cpucount = vm.get_cpu_count()
        if cpucount != current_vcpu:
            test.fail("Incorrect initial guest vcpu\nExpected:%s Actual:%s" %
                      (cpucount, current_vcpu))

        if config_pin:
            cpustats = cpu.get_cpustats(vm)
            if not cpustats:
                test.fail("cpu stats command failed to run")

            logging.debug("Check cpustats for emulatorpinned cpu")
            if cpustats[cpus_list[-1]][0] > 0:
                fail = True
                logging.error("Non zero vcputime even with no vcpu pinned")
            if cpustats[cpus_list[-1]][1] == 0:
                fail = True
                logging.error("emulatortime should be positive as it is pinned")

            logging.debug("Check cpustats for vcpupinned cpu")
            if cpustats[cpus_list[0]][0] == 0:
                fail = True
                logging.error("vcputime should be positive as vcpu it is pinned")
            if cpustats[cpus_list[0]][1] > 0:
                fail = True
                logging.error("Non zero emulatortime even with emulator unpinned")

            logging.debug("Check cpustats for non-pinned cpus")
            for index in cpus_list[1:-1]:
                if cpustats[index][2] > 0:
                    fail = True
                    logging.error("Non zero cputime even with no vcpu,emualtor pinned")

        if condition:
            condn_result = set_condition(vm_name, condition)

        # Action:
        for _ in range(iterations):
            if emulatorpin:
                # To make sure cpu to be offline during host_smt
                hostcpu = cpus_list[-1]
                result = virsh.emulatorpin(vm_name, hostcpu, debug=True)
                libvirt.check_exit_status(result)
                cpustats = cpu.get_cpustats(vm, hostcpu)
                logging.debug("hostcpu:%s vcputime: %s emulatortime: "
                              "%s cputime: %s", hostcpu, cpustats[hostcpu][0],
                              cpustats[hostcpu][1], cpustats[hostcpu][2])
            for vcpu in range(max_vcpu):
                if pintype == "random":
                    hostcpu = random.choice(cpus_list[:-1])
                if pintype == "sequential":
                    hostcpu = cpus_list[vcpu % len(cpus_list[:-1])]
                result = virsh.vcpupin(vm_name, vcpu, hostcpu,
                                       ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                cpustats = cpu.get_cpustats(vm, hostcpu)
                logging.debug("hostcpu:%s vcputime: %s emulatortime: "
                              "%s cputime: %s", hostcpu, cpustats[hostcpu][0],
                              cpustats[hostcpu][1], cpustats[hostcpu][2])
                if config_pin:
                    if cpustats[hostcpu][0] == 0:
                        fail = True
                        logging.error("vcputime should be positive as vcpu is pinned")
                    if cpustats[hostcpu][1] > 0:
                        fail = True
                        logging.error("Non zero emulatortime even with emulator unpinned")
        if condition:
            set_condition(vm_name, condition, reset=True, guestbt=condn_result)

        # Check for guest functional
        cpucount = vm.get_cpu_count()
        if cpucount != current_vcpu:
            test.fail("Incorrect final guest vcpu\nExpected:%s Actual:%s" %
                      (cpucount, current_vcpu))
    finally:
        if fail:
            test.fail("Consult previous errors")
        org_xml.sync()
Ejemplo n.º 13
0
def run(test, params, env):
    """
    Test the command virsh vcpupin

    (1) Get the host and guest cpu count
    (2) Call virsh vcpupin for each vcpu with pinning of each cpu
    (3) Check whether the virsh vcpupin has pinned the respective vcpu to cpu
    """

    def affinity_from_vcpuinfo(vm_name, vcpu):
        """
        This function returns list of the vcpu's affinity from
        virsh vcpuinfo output

        :param vm_name: VM Name to operate on
        :param vcpu: vcpu number for which the affinity is required
        """

        output = virsh.vcpuinfo(vm_name).stdout.rstrip()
        affinity = re.findall('CPU Affinity: +[-y]+', output)
        total_affinity = affinity[int(vcpu)].split()[-1].strip()
        actual_affinity = list(total_affinity)
        return actual_affinity

    def affinity_from_vcpupin(vm_name, vcpu):
        """
        This function returns list of vcpu's affinity from vcpupin output

        :param vm_name: VM Name
        :param vcpu: VM cpu pid
        :return : list of affinity to vcpus
        """
        total_cpu = process.run("ls -d /sys/devices/system/cpu/cpu[0-9]* |wc -l", shell=True).stdout_text.strip()
        vcpus_affinity = {}
        output = virsh.vcpupin(vm_name).stdout
        for item in output.split('\n')[2:-2]:
            item = item.strip()
            split_key = ' '
            if ':' in item:
                split_key = ':'
            vcpus_affinity[item.split(split_key)[0].strip()] = item.split(split_key)[-1].strip()
        return utils_test.libvirt.cpus_string_to_affinity_list(
            vcpus_affinity[str(vcpu)], int(total_cpu))

    def check_vcpupin(vm_name, vcpu, cpu_list, pid, vcpu_pid):
        """
        This function checks the actual and the expected affinity of given vcpu
        and raises error if not matchs

        :param vm_name: VM Name to operate on
        :param vcpu: vcpu number for which the affinity is required
        :param cpu: cpu details for the affinity
        :param pid: VM pid
        :param vcpu: VM cpu pid
        """

        total_cpu = process.run("ls -d /sys/devices/system/cpu/cpu[0-9]* |wc -l", shell=True).stdout_text.strip()
        logging.debug("Debug: cpulist %s", cpu_list)
        expected_output = utils_test.libvirt.cpus_string_to_affinity_list(
            cpu_list,
            int(total_cpu))
        logging.debug("Expected affinity: %s", expected_output)

        # Check for affinity value from vcpuinfo output
        actual_output = affinity_from_vcpuinfo(vm_name, vcpu)
        logging.debug("Actual affinity in vcpuinfo output: %s", actual_output)
        if expected_output == actual_output:
            logging.info("successfully pinned cpu_list: %s --> vcpu: %s",
                         cpu_list, vcpu)
        else:
            test.fail("Cpu pinning details not updated properly in"
                      " virsh vcpuinfo command output")

        # Check for affinity value from vcpupin output
        actual_output_vcpupin = affinity_from_vcpupin(vm_name, vcpu)
        logging.debug("Actual affinity in vcpupin output: %s", actual_output_vcpupin)
        if expected_output == actual_output_vcpupin:
            logging.info("successfully pinned cpu_list: %s --> vcpu: %s",
                         cpu_list, vcpu)
        else:
            test.fail("Cpu pinning details not updated properly in"
                      " virsh vcpupin command output")

        if pid is None:
            return
        # Get the actual cpu affinity value in the proc entry
        output = utils_test.libvirt.cpu_allowed_list_by_task(pid, vcpu_pid)
        actual_output_proc = utils_test.libvirt.cpus_string_to_affinity_list(
            output,
            int(total_cpu))
        logging.debug("Actual affinity in guest proc: %s", actual_output_proc)
        if expected_output == actual_output_proc:
            logging.info("successfully pinned vcpu: %s --> cpu: %s"
                         " in respective proc entry", vcpu, cpu_list)
        else:
            test.fail("Cpu pinning details are not "
                      "updated properly in /proc/"
                      "%s/task/%s/status" % (pid, vcpu_pid))

    def run_and_check_vcpupin(vm, vm_ref, vcpu, cpu_list, options):
        """
        Run the vcpupin command and then check the result.
        """
        if vm_ref == "name":
            vm_ref = vm.name
        elif vm_ref == "uuid":
            vm_ref = vm.get_uuid()
        # Execute virsh vcpupin command.
        cmdResult = virsh.vcpupin(vm_ref, vcpu, cpu_list, options, debug=True)
        if cmdResult.exit_status:
            if not status_error:
                # Command fail and it is positive case.
                test.fail(cmdResult)
            else:
                # Command fail and it is negative case.
                return
        else:
            if status_error:
                # Command success and it is negative case.
                test.fail(cmdResult)
            else:
                # Command success and it is positive case.
                # "--config" will take effect after VM destroyed.
                pid = None
                vcpu_pid = None
                if options == "--config":
                    virsh.destroy(vm.name)
                else:
                    pid = vm.get_pid()
                    logging.debug("vcpus_pid: %s", vm.get_vcpus_pid())
                    vcpu_pid = vm.get_vcpus_pid()[vcpu]
                # Check the result of vcpupin command.
                check_vcpupin(vm.name, vcpu, cpu_list, pid, vcpu_pid)

    def offline_pin_and_check(vm, vcpu, cpu_list):
        """
        Edit domain xml to pin vcpu and check the result.
        """
        cputune = vm_xml.VMCPUTuneXML()
        cputune.vcpupins = [{'vcpu': str(vcpu), 'cpuset': cpu_list}]
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        vmxml.cputune = cputune
        vmxml.sync()
        utils_misc.wait_for(lambda: vm.state() == "shut off", 10)
        cmdResult = virsh.start(vm.name, debug=True)
        libvirt.check_exit_status(cmdResult, status_error)
        pid = vm.get_pid()
        vcpu_pid = vm.get_vcpus_pid()[vcpu]
        check_vcpupin(vm.name, vcpu, cpu_list, pid, vcpu_pid)

    if not virsh.has_help_command('vcpucount'):
        test.cancel("This version of libvirt doesn't"
                    " support this test")
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    # Get the variables for vcpupin command.
    vm_ref = params.get("vcpupin_vm_ref", "name")
    options = params.get("vcpupin_options", "--current")
    cpu_list = params.get("vcpupin_cpu_list", "x")
    cpu_itrs = int(params.get("vcpupin_cpu_itrs", "20"))
    start_vm = ("yes" == params.get("start_vm", "yes"))
    vcpupin_initial = ("yes" == params.get("vcpupin_initial", "no"))

    # Get status of this case.
    status_error = ("yes" == params.get("status_error", "no"))

    # Edit domain xml to pin vcpus
    offline_pin = ("yes" == params.get("offline_pin", "no"))

    # Backup for recovery.
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    if start_vm and vm.state() == "shut off":
        cmdResult = virsh.start(vm.name, debug=True)
        libvirt.check_exit_status(cmdResult, status_error)

    # Get the guest vcpu count
    if offline_pin:
        vcpucount_option = "--config --active"
    else:
        vcpucount_option = "--live --active"
    guest_vcpu_count = virsh.vcpucount(vm_name,
                                       vcpucount_option).stdout.strip()

    # Find the alive cpus list
    cpus_list = list(map(str, cpuutils.cpu_online_list()))
    logging.info("Active cpus in host are %s", cpus_list)

    # If the cpus_list has too many cpus, then only test the
    # cpus of the head, middle and tail part
    lenght = len(cpus_list)
    if lenght > 30:
        cpus_list = cpus_list[:10] + \
                    cpus_list[lenght//2 - 5: lenght//2 + 5] + \
                    cpus_list[lenght - 10:]
        logging.info('Will run test on cpus: %s', cpus_list)

    try:
        # Control multi domain vcpu affinity
        multi_dom = ("yes" == params.get("multi_dom_pin", "no"))
        vm2 = None
        # Before doing any vcpupin actions, lets check whether
        # initial pinning state is fine
        if vcpupin_initial:
            pid = vm.get_pid()
            logging.debug("vcpus_pid: %s vcpu count: %s", vm.get_vcpus_pid(), guest_vcpu_count)
            for vcpu in range(int(guest_vcpu_count)):
                vcpu_pid = vm.get_vcpus_pid()[vcpu]
                # Check the result of vcpupin command.
                check_vcpupin(vm.name, vcpu, str(','.join(list(map(str, cpuutils.cpu_online_list())))), pid, vcpu_pid)
            return

        if multi_dom:
            vm_names = params.get("vms").split()
            if len(vm_names) > 1:
                vm2 = env.get_vm(vm_names[1])
            else:
                test.error("Need more than one domains")
            if not vm2:
                test.cancel("No %s find" % vm_names[1])
            vm2.destroy()
            vm2xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm2.name)
            vm2xml_backup = vm2xml.copy()
            # Make sure vm2 has the same cpu numbers with vm
            vm2xml.set_vm_vcpus(vm2.name, int(guest_vcpu_count), guest_vcpu_count)
            if start_vm:
                vm2.start()

        # Run cases when guest is shutoff.
        if not offline_pin:
            if vm.is_dead() and not start_vm:
                run_and_check_vcpupin(vm, vm_ref, 0, 0, "")
                return
        # Get the host cpu count
        host_online_cpu_count = len(cpus_list)
        online_cpu_max = max(map(int, cpus_list))
        host_cpu_count = cpuutils.total_cpus_count()
        cpu_max = int(host_cpu_count) - 1
        if (host_online_cpu_count < 2) and (not cpu_list == "x"):
            test.cancel("We need more cpus on host in this "
                        "case for the cpu_list=%s. But "
                        "current number of cpu on host is %s."
                        % (cpu_list, host_online_cpu_count))

        # Run test case
        for vcpu in range(int(guest_vcpu_count)):
            if cpu_list == "x":
                # Lets make 2 times cpu_itrs value as threshold
                # for guest vcpu iterations as there are possibility
                # the iterations will grow bigger number causing test
                # to run for a very long time and based on assumption
                # this let us recalculate cpu_itrs value with least value as 2
                if (int(guest_vcpu_count) * host_online_cpu_count) > (2 * cpu_itrs * cpu_itrs):
                    if int(guest_vcpu_count) > 2 * cpu_itrs:
                        cpu_itrs = (2 * cpu_itrs * cpu_itrs)/int(guest_vcpu_count)
                        if cpu_itrs == 0:
                            cpu_itrs = 2
                for _ in range(cpu_itrs):
                    cpu = random.choice(cpus_list)
                    left_cpus = "0-%s,^%s" % (online_cpu_max, cpu)
                    if offline_pin:
                        offline_pin_and_check(vm, vcpu, str(cpu))
                        if multi_dom:
                            offline_pin_and_check(vm2, vcpu, left_cpus)
                    else:
                        run_and_check_vcpupin(vm, vm_ref, vcpu, str(cpu),
                                              options)
                        if multi_dom:
                            run_and_check_vcpupin(vm2, "name", vcpu, left_cpus,
                                                  options)
            else:
                if cpu_list == "x-y":
                    cpus = "0-%s" % online_cpu_max
                elif cpu_list == "x,y":
                    cpus = ','.join(random.sample(cpus_list, 2))
                    logging.info(cpus)
                elif cpu_list == "x-y,^z":
                    cpus = "0-%s,^%s" % (online_cpu_max, online_cpu_max)
                elif cpu_list == "r":
                    cpus = "r"
                elif cpu_list == "-1":
                    cpus = "-1"
                elif cpu_list == "out_of_max":
                    cpus = str(cpu_max + 1)
                else:
                    test.cancel("Cpu_list=%s is not recognized."
                                % cpu_list)
                if offline_pin:
                    offline_pin_and_check(vm, vcpu, cpus)
                else:
                    run_and_check_vcpupin(vm, vm_ref, vcpu, cpus, options)
    finally:
        # Recover xml of vm.
        vmxml_backup.sync()
        if vm2:
            vm2xml_backup.sync()
def run(test, params, env):
    """
    Domain CPU management testing.

    1. Prepare a domain for testing, install qemu-guest-ga if needed.
    2. Checking for vcpu numbers in vcpucount, vcpuinfo, domain xml,
       vcpupin and inside domain.
    3. Plug vcpu for the domain.
    4. Repeat step 2 to check again.
    5. Control domain(save, managedsave, s3, s4, etc.).
    6. Repeat step 2 to check again.
    7. Recover domain(restore, wakeup, etc.).
    8. Repeat step 2 to check again.
    9. Unplug vcpu for the domain.
    10. Repeat step 2 to check again.
    11. Repeat step 5 to control domain(As BZ#1088216 not fix, skip
        save/managedsave related actions).
    12. Repeat step 2 to check again.
    13. Repeat step 7 to recover domain.
    14. Repeat step 2 to check again.
    15. Recover test environment.
    """

    def manipulate_domain(vm_name, vm_operation, recover=False):
        """
        Operate domain to given state or recover it.

        :params vm_name: Name of the VM domain
        :params vm_operation: Operation to be performed on VM domain
                              like save, managedsave, suspend
        :params recover: flag to inform whether to set or reset
                         vm_operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
        if not recover:
            if vm_operation == "save":
                save_option = ""
                result = virsh.save(vm_name, save_file, save_option,
                                    ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "managedsave":
                managedsave_option = ""
                result = virsh.managedsave(vm_name, managedsave_option,
                                           ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s3":
                suspend_target = "mem"
                result = virsh.dompmsuspend(vm_name, suspend_target,
                                            ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s4":
                suspend_target = "disk"
                result = virsh.dompmsuspend(vm_name, suspend_target,
                                            ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                # Wait domain state change: 'in shutdown' -> 'shut off'
                utils_misc.wait_for(lambda: virsh.is_dead(vm_name), 5)
            elif vm_operation == "suspend":
                result = virsh.suspend(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "reboot":
                vm.reboot()
                vm_uptime_init = vm.uptime()
            else:
                logging.debug("No operation for the domain")

        else:
            if vm_operation == "save":
                if os.path.exists(save_file):
                    result = virsh.restore(save_file, ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    os.remove(save_file)
                else:
                    test.error("No save file for domain restore")
            elif vm_operation in ["managedsave", "s4"]:
                result = virsh.start(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s3":
                suspend_target = "mem"
                result = virsh.dompmwakeup(vm_name, ignore_status=True,
                                           debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "suspend":
                result = virsh.resume(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "reboot":
                pass
            else:
                logging.debug("No need recover the domain")

    def online_new_vcpu(vm, vcpu_plug_num):
        """
        For Fedora/RHEL7 guests, udev can not online hot-added CPUs
        automatically, (refer to BZ#968811 for details) so enable them manually.

        :params vm: VM object
        :params vcpu_plug_num: Hotplugged vcpu count
        """
        cpu_is_online = []
        session = vm.wait_for_login()
        for i in range(1, int(vcpu_plug_num)):
            cpu_is_online.append(False)
            cpu = "/sys/devices/system/cpu/cpu%s/online" % i
            cmd_s, cmd_o = session.cmd_status_output("cat %s" % cpu)
            logging.debug("cmd exist status: %s, cmd output %s", cmd_s, cmd_o)
            if cmd_s != 0:
                logging.error("Can not find cpu %s in domain", i)
            else:
                if cmd_o.strip() == "0":
                    if session.cmd_status("echo 1 > %s" % cpu) == 0:
                        cpu_is_online[i-1] = True
                    else:
                        logging.error("Fail to enable cpu %s online", i)
                else:
                    cpu_is_online[i-1] = True
        session.close()
        return False not in cpu_is_online

    def check_setvcpus_result(cmd_result, expect_error):
        """
        Check command result.

        For setvcpus, pass unsupported commands(plug or unplug vcpus) by
        checking command stderr.

        :params cmd_result: Command result
        :params expect_error: Whether to expect error True or False
        """
        if cmd_result.exit_status != 0:
            if expect_error:
                logging.debug("Expect fail: %s", cmd_result.stderr)
                return
            # setvcpu/hotplug is only available as of qemu 1.5 and it's still
            # evolving. In general the addition of vcpu's may use the QMP
            # "cpu_set" (qemu 1.5) or "cpu-add" (qemu 1.6 and later) commands.
            # The removal of vcpu's may work in qemu 1.5 due to how cpu_set
            # can set vcpus online or offline; however, there doesn't appear
            # to be a complementary cpu-del feature yet, so we can add, but
            # not delete in 1.6.

            # A 1.6 qemu will not allow the cpu-add command to be run on
            # a configuration using <os> machine property 1.4 or earlier.
            # That is the XML <os> element with the <type> property having
            # an attribute 'machine' which is a tuple of 3 elements separated
            # by a dash, such as "pc-i440fx-1.5" or "pc-q35-1.5".
            if re.search("unable to execute QEMU command 'cpu-add'",
                         cmd_result.stderr):
                test.cancel("guest <os> machine property may be too"
                            "  old to allow hotplug")

            # A qemu older than 1.5 or an unplug for 1.6 will result in
            # the following failure.  In general, any time libvirt determines
            # it cannot support adding or removing a vCPU...
            if re.search("cannot change vcpu count of this domain",
                         cmd_result.stderr):
                test.cancel("Unsupport virsh setvcpu hotplug")

            # Maybe QEMU doesn't support unplug vcpu
            if re.search("Operation not supported: qemu didn't unplug the vCPUs",
                         cmd_result.stderr):
                test.cancel("Your qemu unsupport unplug vcpu")

            # Qemu guest agent version could be too low
            if re.search("The command guest-get-vcpus has not been found",
                         cmd_result.stderr):
                err_msg = "Your agent version is too low: %s" % cmd_result.stderr
                logging.warning(err_msg)
                test.cancel(err_msg)

            # Attempting to enable more vCPUs in the guest than is currently
            # enabled in the guest but less than the maximum count for the VM
            if re.search("requested vcpu count is greater than the count of "
                         "enabled vcpus in the domain",
                         cmd_result.stderr):
                logging.debug("Expect fail: %s", cmd_result.stderr)
                return

            # Otherwise, it seems we have a real error
            test.fail("Run failed with right command: %s"
                      % cmd_result.stderr)
        else:
            if expect_error:
                test.fail("Expect fail but run successfully")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_uptime_init = 0
    vm_operation = params.get("vm_operation", "null")
    vcpu_max_num = int(params.get("vcpu_max_num"))
    vcpu_current_num = int(params.get("vcpu_current_num"))
    vcpu_plug = "yes" == params.get("vcpu_plug", "no")
    vcpu_plug_num = int(params.get("vcpu_plug_num"))
    vcpu_unplug = "yes" == params.get("vcpu_unplug", "no")
    vcpu_unplug_num = int(params.get("vcpu_unplug_num"))
    vcpu_max_timeout = int(params.get("vcpu_max_timeout", "480"))
    setvcpu_option = params.get("setvcpu_option", "")
    agent_channel = "yes" == params.get("agent_channel", "yes")
    install_qemuga = "yes" == params.get("install_qemuga", "no")
    start_qemuga = "yes" == params.get("start_qemuga", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    setvcpu_readonly = "yes" == params.get("setvcpu_readonly", "no")
    status_error = "yes" == params.get("status_error", "no")
    pin_before_plug = "yes" == params.get("pin_before_plug", "no")
    pin_after_plug = "yes" == params.get("pin_after_plug", "no")
    pin_before_unplug = "yes" == params.get("pin_before_unplug", "no")
    pin_after_unplug = "yes" == params.get("pin_after_unplug", "no")
    pin_vcpu = params.get("pin_vcpu")
    pin_cpu_list = params.get("pin_cpu_list", "x")
    check_after_plug_fail = "yes" == params.get("check_after_plug_fail", "no")
    with_stress = "yes" == params.get("run_stress", "no")
    iterations = int(params.get("test_itr", 1))
    topology_correction = "yes" == params.get("topology_correction", "no")
    # Init expect vcpu count values
    expect_vcpu_num = {'max_config': vcpu_max_num, 'max_live': vcpu_max_num,
                       'cur_config': vcpu_current_num,
                       'cur_live': vcpu_current_num,
                       'guest_live': vcpu_current_num}
    if check_after_plug_fail:
        expect_vcpu_num_bk = expect_vcpu_num.copy()
    # Init expect vcpu pin values
    expect_vcpupin = {}
    result_failed = 0

    # Init cpu-list for vcpupin
    host_cpu_count = os.sysconf('SC_NPROCESSORS_CONF')
    if (int(host_cpu_count) < 2) and (not pin_cpu_list == "x"):
        test.cancel("We need more cpus on host in this case for the cpu-list"
                    "=%s. But current number of cpu on host is %s."
                    % (pin_cpu_list, host_cpu_count))

    cpus_list = cpu_util.cpu_online_list()
    logging.debug("Active cpus in host are %s", cpus_list)

    cpu_seq_str = ""
    for i in range(len(cpus_list) - 1):
        if int(cpus_list[i]) + 1 == int(cpus_list[i + 1]):
            cpu_seq_str = "%s-%s" % (cpus_list[i], cpus_list[i + 1])
            break

    if pin_cpu_list == "x":
        pin_cpu_list = cpus_list[-1]
    if pin_cpu_list == "x-y":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str
        else:
            pin_cpu_list = "%s-%s" % (cpus_list[0], cpus_list[0])
    elif pin_cpu_list == "x,y":
        pin_cpu_list = "%s,%s" % (cpus_list[0], cpus_list[1])
    elif pin_cpu_list == "x-y,^z":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str + ",^%s" % cpu_seq_str.split('-')[1]
        else:
            pin_cpu_list = "%s,%s,^%s" % (cpus_list[0], cpus_list[1],
                                          cpus_list[0])
    else:
        # Just use the value get from cfg
        pass

    need_mkswap = False
    # Back up domain XML
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    try:
        # Customize domain vcpu number
        if vm.is_alive():
            vm.destroy()
        if agent_channel:
            vmxml.set_agent_channel()
        else:
            vmxml.remove_agent_channels()
        vmxml.sync()

        vmxml.set_vm_vcpus(vm_name, vcpu_max_num, vcpu_current_num,
                           topology_correction=topology_correction)
        # Do not apply S3/S4 on power
        cpu_arch = platform.machine()
        if cpu_arch in ('x86_64', 'i386', 'i686'):
            vmxml.set_pm_suspend(vm_name, "yes", "yes")
        vm.start()
        vm_uptime_init = vm.uptime()
        if with_stress:
            bt = utils_test.run_avocado_bg(vm, params, test)
            if not bt:
                test.cancel("guest stress failed to start")
        # Create swap partition/file if nessesary
        if vm_operation == "s4":
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition")
            vm.create_swap_partition()

        # Prepare qemu guest agent
        if install_qemuga:
            vm.prepare_guest_agent(prepare_xml=False, start=start_qemuga)
            vm.setenforce(0)
        else:
            # Remove qemu-guest-agent for negative test
            vm.remove_package('qemu-guest-agent')

        # Run test
        for _ in range(iterations):
            if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num):
                logging.error("Expected vcpu check failed")
                result_failed += 1
            # plug vcpu
            if vcpu_plug:
                # Pin vcpu
                if pin_before_plug:
                    result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                           ignore_status=True, debug=True)
                    libvirt.check_exit_status(result)
                    expect_vcpupin = {pin_vcpu: pin_cpu_list}

                result = virsh.setvcpus(vm_name, vcpu_plug_num, setvcpu_option,
                                        readonly=setvcpu_readonly,
                                        ignore_status=True, debug=True)
                check_setvcpus_result(result, status_error)

                if setvcpu_option == "--config":
                    expect_vcpu_num['cur_config'] = vcpu_plug_num
                elif setvcpu_option == "--guest":
                    # vcpuset '--guest' only affect vcpu number in guest
                    expect_vcpu_num['guest_live'] = vcpu_plug_num
                else:
                    expect_vcpu_num['cur_live'] = vcpu_plug_num
                    expect_vcpu_num['guest_live'] = vcpu_plug_num
                    if not status_error:
                        if not utils_misc.wait_for(lambda: utils_misc.check_if_vm_vcpu_match(vcpu_plug_num, vm),
                                                   vcpu_max_timeout, text="wait for vcpu online") or not online_new_vcpu(vm, vcpu_plug_num):
                            test.fail("Fail to enable new added cpu")

                # Pin vcpu
                if pin_after_plug:
                    result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                           ignore_status=True, debug=True)
                    libvirt.check_exit_status(result)
                    expect_vcpupin = {pin_vcpu: pin_cpu_list}

                if status_error and check_after_plug_fail:
                    if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num_bk, {}, setvcpu_option):
                        logging.error("Expected vcpu check failed")
                        result_failed += 1

                if not status_error:
                    if restart_libvirtd:
                        utils_libvirtd.libvirtd_restart()

                    # Check vcpu number and related commands
                    if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option):
                        logging.error("Expected vcpu check failed")
                        result_failed += 1

                    # Control domain
                    manipulate_domain(vm_name, vm_operation)

                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option):
                            logging.error("Expected vcpu check failed")
                            result_failed += 1

                    # Recover domain
                    manipulate_domain(vm_name, vm_operation, recover=True)

                    # Resume domain from S4 status may takes long time(QEMU bug),
                    # here we wait for 10 mins then skip the remaining part of
                    # tests if domain not resume successfully
                    try:
                        vm.wait_for_login(timeout=600)
                    except Exception as e:
                        test.warn("Skip remaining test steps as domain"
                                  " not resume in 10 mins: %s" % e)
                    # For hotplug/unplug vcpu without '--config flag, after
                    # suspend domain to disk(shut off) and re-start it, the
                    # current live vcpu number will recover to orinial value
                    if vm_operation == 's4':
                        if setvcpu_option.count("--config"):
                            expect_vcpu_num['cur_live'] = vcpu_plug_num
                            expect_vcpu_num['guest_live'] = vcpu_plug_num
                        elif setvcpu_option.count("--guest"):
                            expect_vcpu_num['guest_live'] = vcpu_plug_num
                        else:
                            expect_vcpu_num['cur_live'] = vcpu_current_num
                            expect_vcpu_num['guest_live'] = vcpu_current_num
                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option):
                            logging.error("Expected vcpu check failed")
                            result_failed += 1

            # Unplug vcpu
            # Since QEMU 2.2.0, by default all current vcpus are non-hotpluggable
            # when VM started , and it required that vcpu 0(id=1) is always
            # present and non-hotpluggable, which means we can't hotunplug these
            # vcpus directly. So we can either hotplug more vcpus before we do
            # hotunplug, or modify the 'hotpluggable' attribute to 'yes' of the
            # vcpus except vcpu 0, to make sure libvirt can find appropriate
            # hotpluggable vcpus to reach the desired target vcpu count. For
            # simple prepare step, here we choose to hotplug more vcpus.
            if vcpu_unplug:
                if setvcpu_option == "--live":
                    logging.info("Hotplug vcpu to the maximum count to make"
                                 "sure all these new plugged vcpus are "
                                 "hotunpluggable")
                    result = virsh.setvcpus(vm_name, vcpu_max_num, '--live',
                                            debug=True)
                    libvirt.check_exit_status(result)
                # Pin vcpu
                if pin_before_unplug:
                    result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                           ignore_status=True, debug=True)
                    libvirt.check_exit_status(result)
                    # As the vcpu will unplug later, so set
                    # expect_vcpupin to empty
                    expect_vcpupin = {}

                # Operation of setvcpus is asynchronization, even if it return,
                # may not mean it is complete, a poll checking of guest vcpu numbers
                # need to be executed.
                # So for case of unpluging vcpus from max vcpu number to 1, when
                # setvcpus return, need continue to obverse if vcpu number is
                # continually to be unplugged to 1 gradually.
                result = virsh.setvcpus(vm_name, vcpu_unplug_num,
                                        setvcpu_option,
                                        readonly=setvcpu_readonly,
                                        ignore_status=True, debug=True)
                unsupport_str = utils_hotplug.vcpuhotunplug_unsupport_str()
                if unsupport_str and (unsupport_str in result.stderr):
                    test.cancel("Vcpu hotunplug is not supported in this host:"
                                "\n%s" % result.stderr)
                try:
                    session = vm.wait_for_login()
                    cmd = "lscpu | grep \"^CPU(s):\""
                    operation = "setvcpus"
                    prev_output = -1
                    while True:
                        ret, output = session.cmd_status_output(cmd)
                        if ret:
                            test.error("Run lscpu failed, output: %s" % output)
                        output = output.split(":")[-1].strip()

                        if int(prev_output) == int(output):
                            break
                        prev_output = output
                        time.sleep(5)
                    logging.debug("CPUs available from inside guest after %s - %s",
                                  operation, output)
                    if int(output) != vcpu_unplug_num:
                        test.fail("CPU %s failed as cpus are not "
                                  "reflected from inside guest" % operation)
                finally:
                    if session:
                        session.close()

                check_setvcpus_result(result, status_error)
                if setvcpu_option == "--config":
                    expect_vcpu_num['cur_config'] = vcpu_unplug_num
                elif setvcpu_option == "--guest":
                    # vcpuset '--guest' only affect vcpu number in guest
                    expect_vcpu_num['guest_live'] = vcpu_unplug_num
                else:
                    expect_vcpu_num['cur_live'] = vcpu_unplug_num
                    expect_vcpu_num['guest_live'] = vcpu_unplug_num

                # Pin vcpu
                if pin_after_unplug:
                    result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                           ignore_status=True, debug=True)
                    libvirt.check_exit_status(result)
                    expect_vcpupin = {pin_vcpu: pin_cpu_list}

                if not status_error:
                    if restart_libvirtd:
                        utils_libvirtd.libvirtd_restart()

                    # Check vcpu number and related commands
                    if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option):
                        logging.error("Expected vcpu check failed")
                        result_failed += 1

                    # Control domain
                    manipulate_domain(vm_name, vm_operation)

                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option):
                            logging.error("Expected vcpu check failed")
                            result_failed += 1

                    # Recover domain
                    manipulate_domain(vm_name, vm_operation, recover=True)

                    # Resume domain from S4 status may takes long time
                    # (QEMU bug), here we wait for 10 mins then skip the
                    # remaining part of tests if domain not resume successfully
                    try:
                        vm.wait_for_login(timeout=600)
                    except Exception as e:
                        test.warn("Skip remaining test steps as domain"
                                  " not resume in 10 mins: %s" % e)
                    # For hotplug/unplug vcpu without '--config flag, after
                    # suspend domain to disk(shut off) and re-start it, the
                    # current live vcpu number will recover to orinial value
                    if vm_operation == 's4':
                        if setvcpu_option.count("--config"):
                            expect_vcpu_num['cur_live'] = vcpu_unplug_num
                            expect_vcpu_num['guest_live'] = vcpu_unplug_num
                        elif setvcpu_option.count("--guest"):
                            expect_vcpu_num['guest_live'] = vcpu_unplug_num
                        else:
                            expect_vcpu_num['cur_live'] = vcpu_current_num
                            expect_vcpu_num['guest_live'] = vcpu_current_num
                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option):
                            logging.error("Expected vcpu check failed")
                            result_failed += 1
        if vm.uptime() < vm_uptime_init:
            test.fail("Unexpected VM reboot detected in between test")
    # Recover env
    finally:
        if need_mkswap:
            vm.cleanup_swap()
        if with_stress:
            bt.join(ignore_status=True)
        vm.destroy()
        backup_xml.sync()

    if not status_error:
        if result_failed > 0:
            test.fail("Test Failed")
Ejemplo n.º 15
0
 def __get_random_cpu():
     """
     Get random online cpu
     """
     return random.choice(cpu.cpu_online_list())
Ejemplo n.º 16
0
def run(test, params, env):
    """
    Test vcpupin while numad is running
    """
    vcpu_placement = params.get("vcpu_placement")
    bug_url = params.get("bug_url", "")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)

    # Prepare numatune memory parameter dict
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    libvirtd = utils_libvirtd.Libvirtd()
    libvirtd.start()

    try:
        # Get host numa node list
        host_numa_node = utils_misc.NumaInfo()
        node_list = host_numa_node.online_nodes_withmem
        logging.debug("host node list is %s", node_list)
        if len(node_list) < 2:
            test.cancel('Online NUMA nodes less than 2')
        node_a, node_b = min(node_list), max(node_list)
        numa_memory.update({'nodeset': '%d,%d' % (node_a, node_b)})
        # Start numad
        try:
            process.run("service numad start")
        except process.CmdError as e:
            # Bug 1218149 closed as not a bug, workaround this as in bug
            # comment 12
            logging.debug("start numad failed with %s", e)
            logging.debug("remove message queue of id 0 and try again")
            process.run("ipcrm msg 0", ignore_status=True)
            process.run("service numad start")

        # Start vm and do vcpupin
        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        vmxml.placement = vcpu_placement
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()
        vm.start()
        vm.wait_for_login()

        # Test vcpupin to the alive cpus list
        cpus_list = list(map(str, cpuutils.cpu_online_list()))
        logging.info("active cpus in host are %s", cpus_list)
        for cpu in cpus_list:
            ret = virsh.vcpupin(vm_name,
                                0,
                                cpu,
                                debug=True,
                                ignore_status=True)
            if ret.exit_status:
                logging.error("related bug url: %s", bug_url)
                test.fail("vcpupin failed: %s" % ret.stderr)
            virsh.vcpuinfo(vm_name, debug=True)
    finally:
        process.run("service numad stop")
        libvirtd.restart()
        backup_xml.sync()
Ejemplo n.º 17
0
def run(test, params, env):
    """
    Different vcpupin scenario tests
    1) prepare the guest with given topology, memory and if any devices
    2) Start and login to the guest, check for cpu, memory
    3) Do different combinations of vcpupin and in parallel run stress
       if given
    4) Do a optional step based on config
    5) Check guest and host functional

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """

    def set_condition(vm_name, condn, reset=False, guestbt=None):
        """
        Set domain to given state or reset it.
        """
        bt = None
        if not reset:
            if condn == "avocadotest":
                bt = utils_test.run_avocado_bg(vm, params, test)
                if not bt:
                    test.cancel("guest stress failed to start")
                # Allow stress to start
                time.sleep(condn_sleep_sec)
                return bt
            elif condn == "stress":
                utils_test.load_stress("stress_in_vms", params=params, vms=[vm])
            elif condn in ["save", "managedsave"]:
                # No action
                pass
            elif condn == "suspend":
                result = virsh.suspend(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "hotplug":
                result = virsh.setvcpus(vm_name, max_vcpu, "--live",
                                        ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                exp_vcpu = {'max_config': max_vcpu, 'max_live': max_vcpu,
                            'cur_config': current_vcpu, 'cur_live': max_vcpu,
                            'guest_live': max_vcpu}
                result = utils_hotplug.check_vcpu_value(vm, exp_vcpu,
                                                        option="--live")
            elif condn == "host_smt":
                if cpu.get_cpu_arch() == 'power9':
                    result = process.run("ppc64_cpu --smt=4", shell=True)
                else:
                    test.cancel("Host SMT changes not allowed during guest live")
            else:
                logging.debug("No operation for the domain")

        else:
            if condn == "save":
                save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
                result = virsh.save(vm_name, save_file,
                                    ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                time.sleep(condn_sleep_sec)
                if os.path.exists(save_file):
                    result = virsh.restore(save_file, ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    os.remove(save_file)
                else:
                    test.error("No save file for domain restore")
            elif condn == "managedsave":
                result = virsh.managedsave(vm_name,
                                           ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                time.sleep(condn_sleep_sec)
                result = virsh.start(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "suspend":
                result = virsh.resume(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "avocadotest":
                guestbt.join(ignore_status=True)
            elif condn == "stress":
                utils_test.unload_stress("stress_in_vms", params=params, vms=[vm])
            elif condn == "hotplug":
                result = virsh.setvcpus(vm_name, current_vcpu, "--live",
                                        ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                exp_vcpu = {'max_config': max_vcpu, 'max_live': current_vcpu,
                            'cur_config': current_vcpu, 'cur_live': current_vcpu,
                            'guest_live': current_vcpu}
                result = utils_hotplug.check_vcpu_value(vm, exp_vcpu,
                                                        option="--live")
            elif condn == "host_smt":
                result = process.run("ppc64_cpu --smt=2", shell=True)
                # Change back the host smt
                result = process.run("ppc64_cpu --smt=4", shell=True)
                # Work around due to known cgroup issue after cpu hot(un)plug
                # sequence
                root_cpuset_path = utils_cgroup.get_cgroup_mountpoint("cpuset")
                machine_cpuset_paths = []
                if os.path.isdir(os.path.join(root_cpuset_path,
                                              "machine.slice")):
                    machine_cpuset_paths.append(os.path.join(root_cpuset_path,
                                                             "machine.slice"))
                if os.path.isdir(os.path.join(root_cpuset_path, "machine")):
                    machine_cpuset_paths.append(os.path.join(root_cpuset_path,
                                                             "machine"))
                if not machine_cpuset_paths:
                    logging.warning("cgroup cpuset might not recover properly "
                                    "for guests after host smt changes, "
                                    "restore it manually")
                root_cpuset_cpus = os.path.join(root_cpuset_path, "cpuset.cpus")
                for path in machine_cpuset_paths:
                    machine_cpuset_cpus = os.path.join(path, "cpuset.cpus")
                    # check if file content differs
                    cmd = "diff %s %s" % (root_cpuset_cpus,
                                          machine_cpuset_cpus)
                    if process.system(cmd, verbose=True, ignore_status=True):
                        cmd = "cp %s %s" % (root_cpuset_cpus,
                                            machine_cpuset_cpus)
                        process.system(cmd, verbose=True)

            else:
                logging.debug("No need recover the domain")
        return bt

    vm_name = params.get("main_vm")
    max_vcpu = int(params.get("max_vcpu", 2))
    current_vcpu = int(params.get("current_vcpu", 1))
    vm_cores = int(params.get("limit_vcpu_cores", 2))
    vm_threads = int(params.get("limit_vcpu_threads", 1))
    vm_sockets = int(params.get("limit_vcpu_sockets", 1))
    vm = env.get_vm(vm_name)
    condition = params.get("condn", "")
    condn_sleep_sec = int(params.get("condn_sleep_sec", 30))
    pintype = params.get("pintype", "random")
    emulatorpin = "yes" == params.get("emulatorpin", "no")
    config_pin = "yes" == params.get("config_pin", "no")
    iterations = int(params.get("itr", 1))
    vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    org_xml = vmxml.copy()
    fail = False
    # Destroy the vm
    vm.destroy()
    try:
        cpus_list = cpu.cpu_online_list()
        if len(cpus_list) < 2:
            test.cancel("Need minimum two online host cpus")
        # Set vcpu and topology
        libvirt_xml.VMXML.set_vm_vcpus(vm_name, max_vcpu, current_vcpu,
                                       vm_sockets, vm_cores, vm_threads)
        if config_pin:
            cpustats = {}
            result = virsh.emulatorpin(vm_name, cpus_list[-1], "config",
                                       debug=True)
            libvirt.check_exit_status(result)
            result = virsh.vcpupin(vm_name, "0", cpus_list[0], "--config",
                                   ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        try:
            vm.start()
        except virt_vm.VMStartError as detail:
            test.fail("%s" % detail)

        cpucount = vm.get_cpu_count()
        if cpucount != current_vcpu:
            test.fail("Incorrect initial guest vcpu\nExpected:%s Actual:%s" %
                      (cpucount, current_vcpu))

        if config_pin:
            cpustats = utils_hotplug.get_cpustats(vm)
            if not cpustats:
                test.fail("cpu stats command failed to run")

            logging.debug("Check cpustats for emulatorpinned cpu")
            if cpustats[cpus_list[-1]][0] > 0:
                fail = True
                logging.error("Non zero vcputime even with no vcpu pinned")
            if cpustats[cpus_list[-1]][1] == 0:
                fail = True
                logging.error("emulatortime should be positive as it is pinned")

            logging.debug("Check cpustats for vcpupinned cpu")
            if cpustats[cpus_list[0]][0] == 0:
                fail = True
                logging.error("vcputime should be positive as vcpu it is pinned")
            if cpustats[cpus_list[0]][1] > 0:
                fail = True
                logging.error("Non zero emulatortime even with emulator unpinned")

            logging.debug("Check cpustats for non-pinned cpus")
            for index in cpus_list[1:-1]:
                if cpustats[index][2] > 0:
                    fail = True
                    logging.error("Non zero cputime even with no vcpu,emualtor pinned")

        if condition:
            condn_result = set_condition(vm_name, condition)

        # Action:
        for _ in range(iterations):
            if emulatorpin:
                # To make sure cpu to be offline during host_smt
                hostcpu = cpus_list[-1]
                result = virsh.emulatorpin(vm_name, hostcpu, debug=True)
                libvirt.check_exit_status(result)
                cpustats = utils_hotplug.get_cpustats(vm, hostcpu)
                logging.debug("hostcpu:%s vcputime: %s emulatortime: "
                              "%s cputime: %s", hostcpu, cpustats[hostcpu][0],
                              cpustats[hostcpu][1], cpustats[hostcpu][2])
            for vcpu in range(max_vcpu):
                if pintype == "random":
                    hostcpu = random.choice(cpus_list[:-1])
                if pintype == "sequential":
                    hostcpu = cpus_list[vcpu % len(cpus_list[:-1])]
                result = virsh.vcpupin(vm_name, vcpu, hostcpu,
                                       ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                cpustats = utils_hotplug.get_cpustats(vm, hostcpu)
                logging.debug("hostcpu:%s vcputime: %s emulatortime: "
                              "%s cputime: %s", hostcpu, cpustats[hostcpu][0],
                              cpustats[hostcpu][1], cpustats[hostcpu][2])
                if config_pin:
                    if cpustats[hostcpu][0] == 0:
                        fail = True
                        logging.error("vcputime should be positive as vcpu is pinned")
                    if cpustats[hostcpu][1] > 0:
                        fail = True
                        logging.error("Non zero emulatortime even with emulator unpinned")
        if condition:
            set_condition(vm_name, condition, reset=True, guestbt=condn_result)

        # Check for guest functional
        cpucount = vm.get_cpu_count()
        if cpucount != current_vcpu:
            test.fail("Incorrect final guest vcpu\nExpected:%s Actual:%s" %
                      (cpucount, current_vcpu))
    finally:
        if fail:
            test.fail("Consult previous errors")
        org_xml.sync()
Ejemplo n.º 18
0
    def test_s390x(self):
        s390x = u"""vendor_id       : IBM/S390
# processors    : 2
bogomips per cpu: 2913.00
max thread id   : 0
features        : esan3 zarch stfle msa ldisp eimm dfp edat etf3eh highgprs te sie
facilities      : 0 1 2 3 4 6 7 8 9 10 12 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 30 31 32 33 34 35 36 37 40 41 42 43 44 45 46 47 48 49 50 51 52 57 73 74 75 76 77
cache0          : level=1 type=Data scope=Private size=96K line_size=256 associativity=6
cache1          : level=1 type=Instruction scope=Private size=64K line_size=256 associativity=4
cache2          : level=2 type=Data scope=Private size=1024K line_size=256 associativity=8
cache3          : level=2 type=Instruction scope=Private size=1024K line_size=256 associativity=8
cache4          : level=3 type=Unified scope=Shared size=49152K line_size=256 associativity=12
cache5          : level=4 type=Unified scope=Shared size=393216K line_size=256 associativity=24
processor 0: version = FF,  identification = 32C047,  machine = 2827
processor 1: version = FF,  identification = 32C047,  machine = 2827

cpu number      : 0
cpu MHz dynamic : 5504
cpu MHz static  : 5504

cpu number      : 1
cpu MHz dynamic : 5504
cpu MHz static  : 5504

"""

        s390x_2 = u"""vendor_id       : IBM/S390
# processors    : 4
bogomips per cpu: 2913.00
max thread id   : 0
features        : esan3 zarch stfle msa ldisp eimm dfp edat etf3eh highgprs te sie
facilities      : 0 1 2 3 4 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 30 31 32 33 34 35 36 37 40 41 42 43 44 45 46 47 48 49 50 51 52 57 64 65 66 67 68 69 70 71 72 73 75 76 77 78 131 132
cache0          : level=1 type=Data scope=Private size=96K line_size=256 associativity=6
cache1          : level=1 type=Instruction scope=Private size=64K line_size=256 associativity=4
cache2          : level=2 type=Data scope=Private size=1024K line_size=256 associativity=8
cache3          : level=2 type=Instruction scope=Private size=1024K line_size=256 associativity=8
cache4          : level=3 type=Unified scope=Shared size=49152K line_size=256 associativity=12
cache5          : level=4 type=Unified scope=Shared size=393216K line_size=256 associativity=24
processor 0: version = 00,  identification = 3FC047,  machine = 2827
processor 1: version = 00,  identification = 3FC047,  machine = 2827
processor 2: version = 00,  identification = 3FC047,  machine = 2827
processor 3: version = 00,  identification = 3FC047,  machine = 2827

cpu number      : 0
cpu MHz dynamic : 5504
cpu MHz static  : 5504

cpu number      : 1
cpu MHz dynamic : 5504
cpu MHz static  : 5504

cpu number      : 2
cpu MHz dynamic : 5504
cpu MHz static  : 5504

cpu number      : 3
cpu MHz dynamic : 5504
cpu MHz static  : 5504
"""

        with mock.patch('avocado.utils.cpu.platform.machine',
                        return_value='s390x'):
            with mock.patch('avocado.utils.cpu.open',
                            return_value=self._get_file_mock(s390x)):
                self.assertEqual(len(cpu.cpu_online_list()), 2)
            with mock.patch('avocado.utils.cpu.open',
                            return_value=self._get_file_mock(s390x_2)):
                self.assertEqual(len(cpu.cpu_online_list()), 4)
Ejemplo n.º 19
0
def run(test, params, env):
    """
    Test emulatorpin tuning

    1) Positive testing
       1.1) get the current emulatorpin parameters for a running/shutoff guest
       1.2) set the current emulatorpin parameters for a running/shutoff guest
    2) Negative testing
       2.1) get emulatorpin parameters for a running/shutoff guest
       2.2) set emulatorpin parameters running/shutoff guest
    """

    # Run test case
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    cgconfig = params.get("cgconfig", "on")
    cpulist = params.get("emulatorpin_cpulist")
    status_error = params.get("status_error", "no")
    change_parameters = params.get("change_parameters", "no")

    # Backup original vm
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    emulatorpin_placement = params.get("emulatorpin_placement", "")
    if emulatorpin_placement:
        vm.destroy()
        vmxml.placement = emulatorpin_placement
        vmxml.sync()
        try:
            vm.start()
        except VMStartError as detail:
            # Recover the VM and failout early
            vmxml_backup.sync()
            logging.debug("Used VM XML:\n %s", vmxml)
            test.fail("VM Fails to start: %s" % detail)

    test_dicts = dict(params)
    test_dicts['vm'] = vm

    host_cpus = cpu.online_cpus_count()
    test_dicts['host_cpus'] = host_cpus
    cpu_max = int(host_cpus) - 1

    cpu_list = None

    # Assemble cpu list for positive test
    if status_error == "no":
        if cpulist is None:
            pass
        elif cpulist == "x":
            cpu_online_map = list(map(str, cpu.cpu_online_list()))
            cpulist = random.choice(cpu_online_map)
        elif cpulist == "x-y":
            # By default, emulator is pined to all cpus, and element
            # 'cputune/emulatorpin' may not exist in VM's XML.
            # And libvirt will do nothing if pin emulator to the same
            # cpus, that means VM's XML still have that element.
            # So for testing, we should avoid that value(0-$cpu_max).
            if cpu_max < 2:
                cpulist = "0-0"
            else:
                cpulist = "0-%s" % (cpu_max - 1)
        elif cpulist == "x,y":
            cpu_online_map = list(map(str, cpu.cpu_online_list()))
            cpulist = ','.join(random.sample(cpu_online_map, 2))
        elif cpulist == "x-y,^z":
            cpulist = "0-%s,^%s" % (cpu_max, cpu_max)
        elif cpulist == "-1":
            cpulist = "-1"
        elif cpulist == "out_of_max":
            cpulist = str(cpu_max + 1)
        else:
            test.cancel("CPU-list=%s is not recognized."
                        % cpulist)
    test_dicts['emulatorpin_cpulist'] = cpulist
    if cpulist:
        cpu_list = cpus_parser(cpulist)
        test_dicts['cpu_list'] = cpu_list
        logging.debug("CPU list is %s", cpu_list)

    cg = utils_cgroup.CgconfigService()

    if cgconfig == "off":
        if cg.cgconfig_is_running():
            cg.cgconfig_stop()

    # positive and negative testing #########
    try:
        if status_error == "no":
            if change_parameters == "no":
                get_emulatorpin_parameter(test_dicts, test)
            else:
                set_emulatorpin_parameter(test_dicts, test)

        if status_error == "yes":
            if change_parameters == "no":
                get_emulatorpin_parameter(test_dicts, test)
            else:
                set_emulatorpin_parameter(test_dicts, test)
    finally:
        # Recover cgconfig and libvirtd service
        if not cg.cgconfig_is_running():
            cg.cgconfig_start()
            utils_libvirtd.libvirtd_restart()
        # Recover vm.
        vmxml_backup.sync()
Ejemplo n.º 20
0
    def test_x86_64_cpu_online(self):
        x86_64 = u"""processor	: 0
vendor_id	: GenuineIntel
cpu family	: 6
model		: 60
model name	: Intel(R) Core(TM) i7-4710MQ CPU @ 2.50GHz
stepping	: 3
microcode	: 0x22
cpu MHz		: 2494.301
cache size	: 6144 KB
physical id	: 0
siblings	: 8
core id		: 0
cpu cores	: 4
apicid		: 0
initial apicid	: 0
fpu		: yes
fpu_exception	: yes
cpuid level	: 13
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm cpuid_fault epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts
bugs		:
bogomips	: 4988.60
clflush size	: 64
cache_alignment	: 64
address sizes	: 39 bits physical, 48 bits virtual
power management:

processor	: 1
vendor_id	: GenuineIntel
cpu family	: 6
model		: 60
model name	: Intel(R) Core(TM) i7-4710MQ CPU @ 2.50GHz
stepping	: 3
microcode	: 0x22
cpu MHz		: 2494.301
cache size	: 6144 KB
physical id	: 0
siblings	: 8
core id		: 0
cpu cores	: 4
apicid		: 1
initial apicid	: 1
fpu		: yes
fpu_exception	: yes
cpuid level	: 13
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm cpuid_fault epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts
bugs		:
bogomips	: 4988.60
clflush size	: 64
cache_alignment	: 64
address sizes	: 39 bits physical, 48 bits virtual
power management:

processor	: 2
vendor_id	: GenuineIntel
cpu family	: 6
model		: 60
model name	: Intel(R) Core(TM) i7-4710MQ CPU @ 2.50GHz
stepping	: 3
microcode	: 0x22
cpu MHz		: 2494.301
cache size	: 6144 KB
physical id	: 0
siblings	: 8
core id		: 1
cpu cores	: 4
apicid		: 2
initial apicid	: 2
fpu		: yes
fpu_exception	: yes
cpuid level	: 13
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm cpuid_fault epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts
bugs		:
bogomips	: 4988.60
clflush size	: 64
cache_alignment	: 64
address sizes	: 39 bits physical, 48 bits virtual
power management:

processor	: 3
vendor_id	: GenuineIntel
cpu family	: 6
model		: 60
model name	: Intel(R) Core(TM) i7-4710MQ CPU @ 2.50GHz
stepping	: 3
microcode	: 0x22
cpu MHz		: 2494.301
cache size	: 6144 KB
physical id	: 0
siblings	: 8
core id		: 1
cpu cores	: 4
apicid		: 3
initial apicid	: 3
fpu		: yes
fpu_exception	: yes
cpuid level	: 13
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm cpuid_fault epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts
bugs		:
bogomips	: 4988.60
clflush size	: 64
cache_alignment	: 64
address sizes	: 39 bits physical, 48 bits virtual
power management:

processor	: 4
vendor_id	: GenuineIntel
cpu family	: 6
model		: 60
model name	: Intel(R) Core(TM) i7-4710MQ CPU @ 2.50GHz
stepping	: 3
microcode	: 0x22
cpu MHz		: 2494.301
cache size	: 6144 KB
physical id	: 0
siblings	: 8
core id		: 2
cpu cores	: 4
apicid		: 4
initial apicid	: 4
fpu		: yes
fpu_exception	: yes
cpuid level	: 13
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm cpuid_fault epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts
bugs		:
bogomips	: 4988.60
clflush size	: 64
cache_alignment	: 64
address sizes	: 39 bits physical, 48 bits virtual
power management:

processor	: 5
vendor_id	: GenuineIntel
cpu family	: 6
model		: 60
model name	: Intel(R) Core(TM) i7-4710MQ CPU @ 2.50GHz
stepping	: 3
microcode	: 0x22
cpu MHz		: 2494.301
cache size	: 6144 KB
physical id	: 0
siblings	: 8
core id		: 2
cpu cores	: 4
apicid		: 5
initial apicid	: 5
fpu		: yes
fpu_exception	: yes
cpuid level	: 13
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm cpuid_fault epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts
bugs		:
bogomips	: 4988.60
clflush size	: 64
cache_alignment	: 64
address sizes	: 39 bits physical, 48 bits virtual
power management:

processor	: 6
vendor_id	: GenuineIntel
cpu family	: 6
model		: 60
model name	: Intel(R) Core(TM) i7-4710MQ CPU @ 2.50GHz
stepping	: 3
microcode	: 0x22
cpu MHz		: 2494.301
cache size	: 6144 KB
physical id	: 0
siblings	: 8
core id		: 3
cpu cores	: 4
apicid		: 6
initial apicid	: 6
fpu		: yes
fpu_exception	: yes
cpuid level	: 13
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm cpuid_fault epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts
bugs		:
bogomips	: 4988.60
clflush size	: 64
cache_alignment	: 64
address sizes	: 39 bits physical, 48 bits virtual
power management:

processor	: 7
vendor_id	: GenuineIntel
cpu family	: 6
model		: 60
model name	: Intel(R) Core(TM) i7-4710MQ CPU @ 2.50GHz
stepping	: 3
microcode	: 0x22
cpu MHz		: 2494.301
cache size	: 6144 KB
physical id	: 0
siblings	: 8
core id		: 3
cpu cores	: 4
apicid		: 7
initial apicid	: 7
fpu		: yes
fpu_exception	: yes
cpuid level	: 13
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm cpuid_fault epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts
bugs		:
bogomips	: 4988.60
clflush size	: 64
cache_alignment	: 64
address sizes	: 39 bits physical, 48 bits virtual
power management:

"""
        with mock.patch('avocado.utils.cpu.platform.machine', return_value='x86_64'):
            with mock.patch('avocado.utils.cpu.open',
                            return_value=self._get_file_mock(x86_64)):
                self.assertEqual(len(cpu.cpu_online_list()), 8)
Ejemplo n.º 21
0
def run(test, params, env):
    """
    Test emulatorpin tuning

    1) Positive testing
       1.1) get the current emulatorpin parameters for a running/shutoff guest
       1.2) set the current emulatorpin parameters for a running/shutoff guest
    2) Negative testing
       2.1) get emulatorpin parameters for a running/shutoff guest
       2.2) set emulatorpin parameters running/shutoff guest
    """

    # Run test case
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    cgconfig = params.get("cgconfig", "on")
    cpulist = params.get("emulatorpin_cpulist")
    status_error = params.get("status_error", "no")
    change_parameters = params.get("change_parameters", "no")

    # Backup original vm
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    emulatorpin_placement = params.get("emulatorpin_placement", "")
    if emulatorpin_placement:
        vm.destroy()
        vmxml.placement = emulatorpin_placement
        vmxml.sync()
        try:
            vm.start()
        except VMStartError as detail:
            # Recover the VM and failout early
            vmxml_backup.sync()
            logging.debug("Used VM XML:\n %s", vmxml)
            test.fail("VM Fails to start: %s" % detail)

    test_dicts = dict(params)
    test_dicts['vm'] = vm

    host_cpus = cpu.online_cpus_count()
    test_dicts['host_cpus'] = host_cpus
    cpu_max = int(host_cpus) - 1

    cpu_list = None

    # Assemble cpu list for positive test
    if status_error == "no":
        if cpulist is None:
            pass
        elif cpulist == "x":
            cpu_online_map = list(map(str, cpu.cpu_online_list()))
            cpulist = random.choice(cpu_online_map)
        elif cpulist == "x-y":
            # By default, emulator is pined to all cpus, and element
            # 'cputune/emulatorpin' may not exist in VM's XML.
            # And libvirt will do nothing if pin emulator to the same
            # cpus, that means VM's XML still have that element.
            # So for testing, we should avoid that value(0-$cpu_max).
            if cpu_max < 2:
                cpulist = "0-0"
            else:
                cpulist = "0-%s" % (cpu_max - 1)
        elif cpulist == "x,y":
            cpu_online_map = list(map(str, cpu.cpu_online_list()))
            cpulist = ','.join(random.sample(cpu_online_map, 2))
        elif cpulist == "x-y,^z":
            cpulist = "0-%s,^%s" % (cpu_max, cpu_max)
        elif cpulist == "-1":
            cpulist = "-1"
        elif cpulist == "out_of_max":
            cpulist = str(cpu_max + 1)
        else:
            test.cancel("CPU-list=%s is not recognized." % cpulist)
    test_dicts['emulatorpin_cpulist'] = cpulist
    if cpulist:
        cpu_list = cpus_parser(cpulist)
        test_dicts['cpu_list'] = cpu_list
        logging.debug("CPU list is %s", cpu_list)

    cg = utils_cgroup.CgconfigService()

    if cgconfig == "off":
        if cg.cgconfig_is_running():
            cg.cgconfig_stop()

    # positive and negative testing #########
    try:
        if status_error == "no":
            if change_parameters == "no":
                get_emulatorpin_parameter(test_dicts, test)
            else:
                set_emulatorpin_parameter(test_dicts, test)

        if status_error == "yes":
            if change_parameters == "no":
                get_emulatorpin_parameter(test_dicts, test)
            else:
                set_emulatorpin_parameter(test_dicts, test)
    finally:
        # Recover cgconfig and libvirtd service
        if not cg.cgconfig_is_running():
            cg.cgconfig_start()
            utils_libvirtd.libvirtd_restart()
        # Recover vm.
        vmxml_backup.sync()
Ejemplo n.º 22
0
    def test_x86_64(self):
        x86_64 = u"""processor	: 0
vendor_id	: GenuineIntel
cpu family	: 6
model		: 60
model name	: Intel(R) Core(TM) i7-4710MQ CPU @ 2.50GHz
stepping	: 3
microcode	: 0x22
cpu MHz		: 2494.301
cache size	: 6144 KB
physical id	: 0
siblings	: 8
core id		: 0
cpu cores	: 4
apicid		: 0
initial apicid	: 0
fpu		: yes
fpu_exception	: yes
cpuid level	: 13
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm cpuid_fault epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts
bugs		:
bogomips	: 4988.60
clflush size	: 64
cache_alignment	: 64
address sizes	: 39 bits physical, 48 bits virtual
power management:

processor	: 1
vendor_id	: GenuineIntel
cpu family	: 6
model		: 60
model name	: Intel(R) Core(TM) i7-4710MQ CPU @ 2.50GHz
stepping	: 3
microcode	: 0x22
cpu MHz		: 2494.301
cache size	: 6144 KB
physical id	: 0
siblings	: 8
core id		: 0
cpu cores	: 4
apicid		: 1
initial apicid	: 1
fpu		: yes
fpu_exception	: yes
cpuid level	: 13
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm cpuid_fault epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts
bugs		:
bogomips	: 4988.60
clflush size	: 64
cache_alignment	: 64
address sizes	: 39 bits physical, 48 bits virtual
power management:

processor	: 2
vendor_id	: GenuineIntel
cpu family	: 6
model		: 60
model name	: Intel(R) Core(TM) i7-4710MQ CPU @ 2.50GHz
stepping	: 3
microcode	: 0x22
cpu MHz		: 2494.301
cache size	: 6144 KB
physical id	: 0
siblings	: 8
core id		: 1
cpu cores	: 4
apicid		: 2
initial apicid	: 2
fpu		: yes
fpu_exception	: yes
cpuid level	: 13
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm cpuid_fault epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts
bugs		:
bogomips	: 4988.60
clflush size	: 64
cache_alignment	: 64
address sizes	: 39 bits physical, 48 bits virtual
power management:

processor	: 3
vendor_id	: GenuineIntel
cpu family	: 6
model		: 60
model name	: Intel(R) Core(TM) i7-4710MQ CPU @ 2.50GHz
stepping	: 3
microcode	: 0x22
cpu MHz		: 2494.301
cache size	: 6144 KB
physical id	: 0
siblings	: 8
core id		: 1
cpu cores	: 4
apicid		: 3
initial apicid	: 3
fpu		: yes
fpu_exception	: yes
cpuid level	: 13
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm cpuid_fault epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts
bugs		:
bogomips	: 4988.60
clflush size	: 64
cache_alignment	: 64
address sizes	: 39 bits physical, 48 bits virtual
power management:

processor	: 4
vendor_id	: GenuineIntel
cpu family	: 6
model		: 60
model name	: Intel(R) Core(TM) i7-4710MQ CPU @ 2.50GHz
stepping	: 3
microcode	: 0x22
cpu MHz		: 2494.301
cache size	: 6144 KB
physical id	: 0
siblings	: 8
core id		: 2
cpu cores	: 4
apicid		: 4
initial apicid	: 4
fpu		: yes
fpu_exception	: yes
cpuid level	: 13
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm cpuid_fault epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts
bugs		:
bogomips	: 4988.60
clflush size	: 64
cache_alignment	: 64
address sizes	: 39 bits physical, 48 bits virtual
power management:

processor	: 5
vendor_id	: GenuineIntel
cpu family	: 6
model		: 60
model name	: Intel(R) Core(TM) i7-4710MQ CPU @ 2.50GHz
stepping	: 3
microcode	: 0x22
cpu MHz		: 2494.301
cache size	: 6144 KB
physical id	: 0
siblings	: 8
core id		: 2
cpu cores	: 4
apicid		: 5
initial apicid	: 5
fpu		: yes
fpu_exception	: yes
cpuid level	: 13
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm cpuid_fault epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts
bugs		:
bogomips	: 4988.60
clflush size	: 64
cache_alignment	: 64
address sizes	: 39 bits physical, 48 bits virtual
power management:

processor	: 6
vendor_id	: GenuineIntel
cpu family	: 6
model		: 60
model name	: Intel(R) Core(TM) i7-4710MQ CPU @ 2.50GHz
stepping	: 3
microcode	: 0x22
cpu MHz		: 2494.301
cache size	: 6144 KB
physical id	: 0
siblings	: 8
core id		: 3
cpu cores	: 4
apicid		: 6
initial apicid	: 6
fpu		: yes
fpu_exception	: yes
cpuid level	: 13
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm cpuid_fault epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts
bugs		:
bogomips	: 4988.60
clflush size	: 64
cache_alignment	: 64
address sizes	: 39 bits physical, 48 bits virtual
power management:

processor	: 7
vendor_id	: GenuineIntel
cpu family	: 6
model		: 60
model name	: Intel(R) Core(TM) i7-4710MQ CPU @ 2.50GHz
stepping	: 3
microcode	: 0x22
cpu MHz		: 2494.301
cache size	: 6144 KB
physical id	: 0
siblings	: 8
core id		: 3
cpu cores	: 4
apicid		: 7
initial apicid	: 7
fpu		: yes
fpu_exception	: yes
cpuid level	: 13
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm cpuid_fault epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts
bugs		:
bogomips	: 4988.60
clflush size	: 64
cache_alignment	: 64
address sizes	: 39 bits physical, 48 bits virtual
power management:

"""
        with mock.patch('avocado.utils.cpu.platform.machine',
                        return_value='x86_64'):
            with mock.patch('avocado.utils.cpu.open',
                            return_value=self._get_file_mock(x86_64)):
                self.assertEqual(len(cpu.cpu_online_list()), 8)
Ejemplo n.º 23
0
 def __get_random_cpu():
     """
     Get random online cpu
     """
     return random.choice(cpu.cpu_online_list())