Exemplo n.º 1
0
    def check_vcpu_affinity():
        """
        check vcpu affinity defined by vcpu cpuset or cputune cpuset
        """
        affinity = vcpu_cpuset if not cputune_cpuset else cputune_cpuset
        affinity = {vcpu: affinity}
        virsh.vcpuinfo(vm_name, debug=True)
        host_cpu_count = cpuutil.total_cpus_count()

        vmxml_live = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(vmxml_live)

        # if vcpu >= maxvcpu, the cputune should not exist in xml
        if int(vcpu) >= int(maxvcpu):
            try:
                if hasattr(vmxml_live, 'cputune'):
                    test.fail("cputune tag is set when vcpu >= maxvcpu")
            except xcepts.LibvirtXMLError:
                pass
        elif "config" in vcpupin_option:
            vcpu_affinity = cpu.affinity_from_vcpupin(vm, vcpu, vcpupin_option)
            affinity = cpu.cpus_string_to_affinity_list(
                str(affinity[vcpu]), host_cpu_count)
            logging.debug("vcpu_affinity {}".format(vcpu_affinity))
            logging.debug("affinity {}".format(affinity))
            if vcpu_affinity[int(vcpu)] != affinity:
                test.fail("vcpu affinity check fail")
        # check the expected vcpu affinity with the one got from running vm
        elif not cpu.check_affinity(vm, affinity):
            test.fail("vcpu affinity check fail")
    def setUp(self):
        """
        Verify :
        1. It is Power system and platform is Power NV.
        2. Cpupower tool is installed.
        """

        if 'ppc' not in distro.detect().arch:
            self.cancel("Processor is not ppc64")
        if not os.path.exists('/sys/devices/system/cpu/cpu0/cpufreq'):
            self.cancel('CPUFREQ is supported only on Power NV')

        smm = SoftwareManager()
        detected_distro = distro.detect()
        self.threshold = int(self.params.get("threshold", default=300000))
        if 'Ubuntu' in detected_distro.name:
            deps = [
                'linux-tools-common',
                'linux-tools-%s' % platform.uname()[2]
            ]
        elif detected_distro.name == "SuSE":
            deps = ['cpupower']
        else:
            deps = ['kernel-tools']
        for package in deps:
            if not smm.check_installed(package) and not smm.install(package):
                self.cancel('%s is needed for the test to be run' % package)

        self.cpus = cpu.total_cpus_count()
        self.cpu_num = 0
        self.max_freq = 0
        self.quad_dict = {}
        self.max_freq_dict = {}
        self.quad_to_cpu_mapping()
Exemplo n.º 3
0
def check_affinity(vm, expect_vcpupin):
    """
    Check the affinity of vcpus in various libvirt API output

    :param vm: VM object
    :param expect_vcpupin: Expected affinity details

    :return: True if affinity matches from different virsh API outputs,
             False if not
    """
    host_cpu_count = utils.total_count() if hasattr(utils, 'total_count') else utils.total_cpus_count()
    affinity_xml = affinity_from_xml(vm)
    affinity_vcpupin = affinity_from_vcpupin(vm)
    affinity_vcpuinfo = affinity_from_vcpuinfo(vm)
    result = True

    for vcpu in list(expect_vcpupin.keys()):
        expect_affinity = cpus_string_to_affinity_list(str(expect_vcpupin[vcpu]), host_cpu_count)
        # Check for vcpuinfo affinity
        if affinity_vcpuinfo[int(vcpu)] != expect_affinity:
            logging.error("CPU affinity in virsh vcpuinfo output"
                          " is unexpected")
            result = False
        # Check for vcpupin affinity
        if affinity_vcpupin[int(vcpu)] != expect_affinity:
            logging.error("Virsh vcpupin output is unexpected")
            result = False
        # Check for affinity in Domain xml
        if affinity_xml:
            if affinity_xml[vcpu] != expect_affinity:
                logging.error("Affinity in domain XML is unexpected")
                result = False
    if result:
        logging.debug("Vcpupin info check pass")
    return result
    def setUp(self):
        """
        Verify :
        1. It is Power system and platform is Power NV.
        2. Cpupower tool is installed.
        """

        if 'ppc' not in distro.detect().arch:
            self.cancel("Processor is not ppc64")
        if not os.path.exists('/sys/devices/system/cpu/cpu0/cpufreq'):
            self.cancel('CPUFREQ is supported only on Power NV')

        smm = SoftwareManager()
        detected_distro = distro.detect()
        self.threshold = int(self.params.get("threshold", default=300000))
        if 'Ubuntu' in detected_distro.name:
            deps = ['linux-tools-common', 'linux-tools-%s'
                    % platform.uname()[2]]
        elif detected_distro.name == "SuSE":
            deps = ['cpupower']
        else:
            deps = ['kernel-tools']
        for package in deps:
            if not smm.check_installed(package) and not smm.install(package):
                self.cancel('%s is needed for the test to be run' % package)

        self.cpus = cpu.total_cpus_count()
        self.cpu_num = 0
        self.max_freq = 0
        self.quad_dict = {}
        self.max_freq_dict = {}
        self.quad_to_cpu_mapping()
Exemplo n.º 5
0
    def setUp(self):
        """
        Verify :
        1. It is Power system and platform is Power NV.
        2. Cpupower tool is installed.
        """

        if 'ppc' not in distro.detect().arch:
            self.cancel("Processor is not ppc64")
        if not os.path.exists('/sys/devices/system/cpu/cpu0/cpufreq'):
            self.cancel('CPUFREQ is supported only on Power NV')

        smm = SoftwareManager()
        detected_distro = distro.detect()
        if 'Ubuntu' in detected_distro.name:
            deps = [
                'linux-tools-common',
                'linux-tools-%s' % platform.uname()[2]
            ]
        elif detected_distro.name == "SuSE":
            deps = ['cpupower']
        else:
            deps = ['kernel-tools']
        for package in deps:
            if not smm.check_installed(package) and not smm.install(package):
                self.cancel('%s is needed for the test to be run' % package)

        fre_min = 0
        fre_max = 0
        freq_info = process.system_output("cpupower frequency-info",
                                          shell=True).decode("utf-8")
        for line in str(freq_info).splitlines():
            if re.search('hardware limits:', line, re.IGNORECASE):
                frq = line.split(":")[1]
                frq_init = frq.split('-')[0]
                frq_last = frq.split('-')[1]
                fre_min = float(frq_init.split('GHz')[0])
                fre_max = float(frq_last.split('GHz')[0])
                break
        threshold = (fre_max - fre_min) * (10**6)
        self.threshold = int(self.params.get("threshold", default=threshold))
        self.cpus = cpu.total_cpus_count()
        self.cpu_num = 0
        self.max_freq = 0
        self.quad_dict = {}
        self.max_freq_dict = {}
        self.quad_to_cpu_mapping()
Exemplo n.º 6
0
def affinity_from_vcpupin(vm):
    """
    Returns dict of vcpu's affinity from virsh vcpupin output

    :param vm: VM object

    :return: dict of affinity of VM
    """
    vcpupin_output = {}
    vcpupin_affinity = {}
    host_cpu_count = utils.total_cpus_count()
    for vcpu in virsh.vcpupin(vm.name).stdout.strip().split('\n')[2:]:
        vcpupin_output[int(vcpu.split(":")[0])] = vcpu.split(":")[1]
    for vcpu in vcpupin_output:
        vcpupin_affinity[vcpu] = libvirt.cpus_string_to_affinity_list(
            vcpupin_output[vcpu], host_cpu_count)
    return vcpupin_affinity
Exemplo n.º 7
0
def affinity_from_vcpupin(vm):
    """
    Returns dict of vcpu's affinity from virsh vcpupin output

    :param vm: VM object

    :return: dict of affinity of VM
    """
    vcpupin_output = {}
    vcpupin_affinity = {}
    host_cpu_count = utils.total_cpus_count()
    result = virsh.vcpupin(vm.name)
    for vcpu in results_stdout_52lts(result).strip().split('\n')[2:]:
        vcpupin_output[int(vcpu.split(":")[0])] = vcpu.split(":")[1]
    for vcpu in vcpupin_output:
        vcpupin_affinity[vcpu] = libvirt.cpus_string_to_affinity_list(
            vcpupin_output[vcpu], host_cpu_count)
    return vcpupin_affinity
Exemplo n.º 8
0
def affinity_from_proc(vm):
    """
    Return dict of affinity from proc

    :param vm: VM object

    :return: dict of affinity of VM
    """
    pid = vm.get_pid()
    proc_affinity = {}
    vcpu_pids = []
    host_cpu_count = utils.total_count() if hasattr(utils, 'total_count') else utils.total_cpus_count()
    vcpu_pids = vm.get_vcpus_pid()
    for vcpu in range(len(vcpu_pids)):
        output = cpu_allowed_list_by_task(pid, vcpu_pids[vcpu])
        output_affinity = cpus_string_to_affinity_list(output, int(host_cpu_count))
        proc_affinity[vcpu] = output_affinity
    return proc_affinity
Exemplo n.º 9
0
def affinity_from_vcpupin(vm, vcpu=None, options=None):
    """
    Returns dict of vcpu's affinity from virsh vcpupin output

    :param vm: VM object
    :param vcpu: virtual cpu to qeury
    :param options: --live, --current or --config
    :return: dict of affinity of VM
    """
    vcpupin_output = {}
    vcpupin_affinity = {}
    host_cpu_count = utils.total_count() if hasattr(utils, 'total_count') else utils.total_cpus_count()
    result = virsh.vcpupin(vm.name, vcpu=vcpu, options=options, debug=True)
    for vcpu in result.stdout_text.strip().split('\n')[2:]:
        # On newer version of libvirt, there is no ':' in
        # vcpupin output anymore
        vcpupin_output[int(vcpu.split()[0].rstrip(':'))] = vcpu.split()[1]
    for vcpu in vcpupin_output:
        vcpupin_affinity[vcpu] = cpus_string_to_affinity_list(vcpupin_output[vcpu], host_cpu_count)
    return vcpupin_affinity
Exemplo n.º 10
0
def affinity_from_proc(vm):
    """
    Return dict of affinity from proc

    :param vm: VM object

    :return: dict of affinity of VM
    """
    pid = vm.get_pid()
    proc_affinity = {}
    vcpu_pids = []
    host_cpu_count = utils.total_cpus_count()
    vcpu_pids = vm.get_vcpus_pid()
    for vcpu in range(len(vcpu_pids)):
        output = utils_test.libvirt.cpu_allowed_list_by_task(
            pid, vcpu_pids[vcpu])
        output_affinity = utils_test.libvirt.cpus_string_to_affinity_list(
            output,
            int(host_cpu_count))
        proc_affinity[vcpu] = output_affinity
    return proc_affinity
Exemplo n.º 11
0
def affinity_from_vcpupin(vm, vcpu=None, options=None):
    """
    Returns dict of vcpu's affinity from virsh vcpupin output

    :param vm: VM object
    :param vcpu: virtual cpu to qeury
    :param options: --live, --current or --config
    :return: dict of affinity of VM
    """
    vcpupin_output = {}
    vcpupin_affinity = {}
    host_cpu_count = utils.total_cpus_count()
    result = virsh.vcpupin(vm.name, vcpu=vcpu, options=options, debug=True)
    for vcpu in results_stdout_52lts(result).strip().split('\n')[2:]:
        # On newer version of libvirt, there is no ':' in
        # vcpupin output anymore
        vcpupin_output[int(vcpu.split()[0].rstrip(':'))] = vcpu.split()[1]
    for vcpu in vcpupin_output:
        vcpupin_affinity[vcpu] = libvirt.cpus_string_to_affinity_list(
            vcpupin_output[vcpu], host_cpu_count)
    return vcpupin_affinity
Exemplo n.º 12
0
    def _prepare_test_environment():
        """
        Prepare the test tools, such as hv_tlbflush & stress

        return: a running HostStress object
        """

        copy_tlbflush_cmd = params["copy_tlbflush_cmd"]

        vm = env.get_vm(params["main_vm"])
        vm.verify_alive()
        session = vm.wait_for_login(timeout=timeout)

        logging.info("Copy tlbflush tool related files")
        for f in tlbflush_filenames:
            copy_file_cmd = utils_misc.set_winutils_letter(
                session, copy_tlbflush_cmd % f)
            session.cmd(copy_file_cmd)

        logging.info("Create a large file for test")
        create_test_file_cmd = params["create_test_file_cmd"]
        test_file_size = params["test_file_size"]
        test_file_size = utils_numeric.normalize_data_size(test_file_size,
                                                           order_magnitude="B")
        session.cmd(create_test_file_cmd % test_file_size)
        vm.graceful_shutdown(timeout=timeout)

        stress_type = params.get("stress_type", "stress")
        stress_pkg_name = params.get("stress_pkg_name", "stress-1.0.4.tar.gz")
        stress_root_dir = data_dir.get_deps_dir("stress")
        downloaded_file_path = os.path.join(stress_root_dir, stress_pkg_name)
        host_cpu_count = cpu.total_cpus_count()

        host_stress = utils_test.HostStress(
            stress_type,
            params,
            download_type="tarball",
            downloaded_file_path=downloaded_file_path,
            stress_args="--cpu %s > /dev/null 2>&1& " % host_cpu_count)
        return host_stress
Exemplo n.º 13
0
def affinity_from_xml(vm):
    """
    Returns dict of the vcpu's affinity from
    guest xml

    :param vm: VM object

    :return: dict of affinity of VM
    """
    host_cpu_count = utils.total_cpus_count()
    xml_affinity_list = []
    xml_affinity = {}
    try:
        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm.name)
        xml_affinity_list = vmxml['cputune'].vcpupins
    except LibvirtXMLNotFoundError:
        logging.debug("No <cputune> element find in domain xml")
        return xml_affinity
    # Store xml_affinity_list to a dict
    for vcpu in xml_affinity_list:
        xml_affinity[vcpu['vcpu']] = libvirt.cpus_string_to_affinity_list(vcpu['cpuset'],
                                                                          host_cpu_count)
    return xml_affinity
Exemplo n.º 14
0
def affinity_from_xml(vm):
    """
    Returns dict of the vcpu's affinity from
    guest xml

    :param vm: VM object

    :return: dict of affinity of VM
    """
    host_cpu_count = utils.total_cpus_count()
    xml_affinity_list = []
    xml_affinity = {}
    try:
        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm.name)
        xml_affinity_list = vmxml['cputune'].vcpupins
    except LibvirtXMLNotFoundError:
        logging.debug("No <cputune> element find in domain xml")
        return xml_affinity
    # Store xml_affinity_list to a dict
    for vcpu in xml_affinity_list:
        xml_affinity[vcpu['vcpu']] = libvirt.cpus_string_to_affinity_list(
            vcpu['cpuset'], host_cpu_count)
    return xml_affinity
Exemplo n.º 15
0
def check_affinity(vm, expect_vcpupin):
    """
    Check the affinity of vcpus in various libvirt API output

    :param vm: VM object
    :param expect_vcpupin: Expected affinity details

    :return: True if affinity matches from different virsh API outputs,
             False if not
    """
    host_cpu_count = utils.total_cpus_count()
    affinity_xml = affinity_from_xml(vm)
    affinity_vcpupin = affinity_from_vcpupin(vm)
    affinity_vcpuinfo = affinity_from_vcpuinfo(vm)
    result = True

    for vcpu in list(expect_vcpupin.keys()):
        expect_affinity = libvirt.cpus_string_to_affinity_list(
            str(expect_vcpupin[vcpu]), host_cpu_count)
        # Check for vcpuinfo affinity
        if affinity_vcpuinfo[int(vcpu)] != expect_affinity:
            logging.error("CPU affinity in virsh vcpuinfo output"
                          " is unexpected")
            result = False
        # Check for vcpupin affinity
        if affinity_vcpupin[int(vcpu)] != expect_affinity:
            logging.error("Virsh vcpupin output is unexpected")
            result = False
        # Check for affinity in Domain xml
        if affinity_xml:
            if affinity_xml[vcpu] != expect_affinity:
                logging.error("Affinity in domain XML is unexpected")
                result = False
    if result:
        logging.debug("Vcpupin info check pass")
    return result
Exemplo n.º 16
0
def run(test, params, env):
    """
    Test virsh cpu-stats command.

    The command can display domain per-CPU and total statistics.
    1. Call virsh cpu-stats [domain]
    2. Call virsh cpu-stats [domain] with valid options
    3. Call virsh cpu-stats [domain] with invalide options
    """

    if not virsh.has_help_command('cpu-stats'):
        test.cancel("This version of libvirt does not support "
                    "the cpu-stats test")

    vm_name = params.get("main_vm", "vm1")
    vm_ref = params.get("cpu_stats_vm_ref")
    status_error = params.get("status_error", "no")
    options = params.get("cpu_stats_options")
    error_msg = params.get("error_msg", "")
    logging.debug("options are %s", options)

    if vm_ref == "name":
        vm_ref = vm_name

    # get host cpus num
    cpus = cpu.online_cpus_count()
    logging.debug("host online cpu num is %s", cpus)

    # get options and put into a dict
    get_total = re.search('total', options)
    get_start = re.search('start', options)
    get_count = re.search('count', options)

    # command without options
    get_noopt = 0
    if not get_total and not get_start and not get_count:
        get_noopt = 1

    # command with only --total option
    get_totalonly = 0
    if not get_start and not get_count and get_total:
        get_totalonly = 1

    option_dict = {}
    if options.strip():
        option_list = options.split('--')
        logging.debug("option_list is %s", option_list)
        for match in option_list[1:]:
            if get_start or get_count:
                option_dict[match.split(' ')[0]] = match.split(' ')[1]

    # check if cpu is enough,if not cancel the test
    if (status_error == "no"):
        cpu_start = int(option_dict.get("start", "0"))
        if cpu_start == 32:
            cpus = cpu.total_cpus_count()
            logging.debug("Host total cpu num: %s", cpus)
        if (cpu_start >= cpus):
            test.cancel("Host cpus are not enough")

    # Run virsh command
    cmd_result = virsh.cpu_stats(vm_ref, options,
                                 ignore_status=True, debug=True)
    output = cmd_result.stdout.strip()
    status = cmd_result.exit_status

    # check status_error
    if status_error == "yes":
        if status == 0:
            test.fail("Run successfully with wrong command!")
        else:
            # Check error message is expected
            if not re.search(error_msg, cmd_result.stderr.strip()):
                test.fail("Error message is not expected!")
    elif status_error == "no":
        if status != 0:
            test.fail("Run failed with right command")
        else:
            # Get cgroup cpu_time
            if not get_totalonly:
                vm = env.get_vm(vm_ref)
                cgpath = utils_cgroup.resolve_task_cgroup_path(
                    vm.get_pid(), "cpuacct")
                # When a VM has an 'emulator' child cgroup present, we must
                # strip off that suffix when detecting the cgroup for a machine
                if os.path.basename(cgpath) == "emulator":
                    cgpath = os.path.dirname(cgpath)
                usage_file = os.path.join(cgpath, "cpuacct.usage_percpu")
                with open(usage_file, 'r') as f:
                    cgtime = f.read().strip().split()
                logging.debug("cgtime get is %s", cgtime)

            # Cut CPUs from output and format to list
            output = re.sub(r'\.', '', output)
            if get_total:
                mt_start = re.search('Total', output).start()
            else:
                mt_start = len(output)
            output_cpus = " ".join(output[:mt_start].split())
            cpus_list = re.compile(r'CPU\d+:').split(output_cpus)

            # conditions that list total time info
            if get_noopt or get_total:
                mt_end = re.search('Total', output).end()
                total_list = output[mt_end + 1:].split()

                total_time = int(total_list[1])
                user_time = int(total_list[4])
                system_time = int(total_list[7])

                # check Total cpu_time >= User + System cpu_time
                if user_time + system_time >= total_time:
                    test.fail("total cpu_time < user_time + "
                              "system_time")
                logging.debug("Check total cpu_time %d >= user + system "
                              "cpu_time %d",
                              total_time, user_time + system_time)

            start_num = 0
            if get_start:
                start_num = int(option_dict["start"])

            end_num = int(cpus)
            if get_count:
                count_num = int(option_dict["count"])
                if end_num > start_num + count_num:
                    end_num = start_num + count_num

            # for only give --total option it only shows "Total" cpu info
            if get_totalonly:
                end_num = -1

            # find CPU[N] in output and sum the cpu_time and cgroup cpu_time
            sum_cputime = 0
            sum_cgtime = 0
            logging.debug("start_num %d, end_num %d", start_num, end_num)
            for i in range(start_num, end_num):
                if not re.search('CPU' + "%i" % i, output):
                    test.fail("Fail to find CPU" + "%i" % i + "in "
                              "result")
                logging.debug("Check CPU" + "%i" % i + " exist")
                sum_cputime += int(cpus_list[i - start_num + 1].split()[1])
                sum_cgtime += int(cgtime[i])

            # check cgroup cpu_time > sum of cpu_time
            if end_num >= 0:
                if sum_cputime > sum_cgtime:
                    test.fail("Check sum of cgroup cpu_time < sum "
                              "of output cpu_time")
                logging.debug("Check sum of cgroup cpu_time %d >= cpu_time %d",
                              sum_cgtime, sum_cputime)

            # check Total cpu_time >= sum of cpu_time when no options
            if get_noopt:
                if total_time < sum_cputime:
                    test.fail("total time < sum of output cpu_time")
                logging.debug("Check total time %d >= sum of output cpu_time"
                              " %d", total_time, sum_cputime)
Exemplo n.º 17
0
def run(test, params, env):
    """
    Test vcpu affinity feature as follows:
    positive test:
        1. use vcpu cpuset in xml to define vcpu affinity
        2. use cputune cpuset in xml to define vcpu affinity
        3. use offline-to-online host cpu as cpuset to run virsh vcpupin
        4. set vcpu placement in xml to auto and check xml result
        5. set vcpu cpuset in xml without placement defined and check xml result
    negative test:
        1. use outrange cpuset as vcpu cpuset in xml to define vcpu affinity
        2. use outrange cpuset as cputune cpuset in xml to define vcpu affinity
        3. use invalid cpuset as cputune cpuset in xml to define vcpu affinity
        4. use duplicate vcpu in xml to define vcpu affinity
        5. use offline host cpu as cputune cpuset to run virsh vcpupin
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    cpuset_mask = params.get("cpuset_mask", "")
    vcpu = params.get("vcpu", "0")
    maxvcpu = params.get("maxvcpu", "8")
    current_vcpu = params.get("current_vcpu", "3")
    check = params.get("check", "")
    config_xml = params.get("config_xml", "")

    status_error = "yes" == params.get("status_error", "no")
    define_fail = "yes" == params.get("define_fail", "no")
    start_fail = "yes" == params.get("start_fail", "no")
    runtime_fail = "yes" == params.get("runtime_fail", "no")
    vm_down = "yes" == params.get("vm_down", "no")

    vcpu_cpuset = params.get("vcpu_cpuset", "")
    cputune_cpuset = params.get("cputune_cpuset", "")
    vcpu_placement = params.get("vcpu_placement", "static")
    err_msg = params.get("err_msg", "")
    start_timeout = int(params.get("start_timeout", "60"))
    offline_hostcpus = params.get("offline_hostcpus", "")
    machine_cpuset_path = params.get("machine_cpuset_path", "")

    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    def check_vcpu_affinity():
        """
        check vcpu affinity defined by vcpu cpuset or cputune cpuset
        """
        affinity = vcpu_cpuset if not cputune_cpuset else cputune_cpuset
        affinity = {vcpu: affinity}
        virsh.vcpuinfo(vm_name, debug=True)
        # check the expected vcpu affinity with the one got from running vm
        if not utils_hotplug.check_affinity(vm, affinity):
            test.fail("vcpu affinity check fail")

    try:
        hostcpu_num = int(cpu.total_cpus_count())

        # online all host cpus
        for x in range(hostcpu_num):
            if cpu.online(x):
                test.fail("fail to online cpu{}".format(x))

        # use vcpu cpuset or/and cputune cpuset to define xml
        del vmxml.cputune
        del vmxml.vcpus
        del vmxml.placement
        vmxml.vcpu = int(maxvcpu)
        vmxml.current_vcpu = current_vcpu

        # config vcpu cpuset for cpuset range test
        num = 1 if not status_error else 0
        cpuset_new = "0-{},^{}".format(hostcpu_num - num, cpuset_mask)
        if (config_xml == "vcpu" and check.endswith("range_cpuset")):
            vcpu_cpuset = cpuset_new
        vmxml.cpuset = vcpu_cpuset

        if vcpu_placement:
            vmxml.placement = vcpu_placement

        if config_xml == "cputune":
            cputune = vm_xml.VMCPUTuneXML()
            if check.endswith("range_cpuset"):
                cputune_cpuset = cpuset_new
            if check.endswith("duplicate_vcpu"):
                cputune.vcpupins = [{
                    'vcpu': vcpu,
                    'cpuset': "2"
                }, {
                    'vcpu': vcpu,
                    'cpuset': "3"
                }]
            else:
                cputune.vcpupins = [{'vcpu': vcpu, 'cpuset': cputune_cpuset}]
            vmxml.cputune = cputune

        logging.debug(vmxml)
        if status_error and define_fail:
            result_to_check = virsh.define(vmxml.xml, debug=True)
        else:
            vmxml.sync()

        # start the vm
        if status_error and start_fail:
            result_to_check = virsh.start(vm_name, debug=True)

        if (not status_error) or runtime_fail:
            result_to_check = virsh.start(vm_name,
                                          debug=True,
                                          ignore_status=False)
            vm.wait_for_login(timeout=start_timeout).close()

            # test vcpu cpuset in offline/online  host cpu scenario
            if check.endswith("offline_hostcpu"):
                if vm_down:
                    vm.shutdown()
                for x in offline_hostcpus.split(','):
                    if cpu.offline(x):
                        test.fail("fail to offline cpu{}".format(x))
                    logging.debug("offline host cpu {}".format(x))
                if vm_down:
                    vm.start()
                    vm.wait_for_login(timeout=start_timeout).close()
                if not status_error:
                    # online host cpu
                    if cpu.online(cputune_cpuset):
                        test.fail(
                            "fail to online cpu{}".format(cputune_cpuset))

            # run virsh vcpupin to config vcpu affinity
            if check.startswith("cputune") and (not config_xml):
                result_to_check = virsh.vcpupin(vm_name,
                                                vcpu,
                                                cputune_cpuset,
                                                debug=True)

            if check == "vcpu_placement":
                check_vcpu_placement(test, params)
            elif not status_error:
                check_vcpu_affinity()

        if 'result_to_check' in locals():
            if err_msg:
                err_msg = err_msg.split(";")
            libvirt.check_result(result_to_check, err_msg)

    finally:
        vmxml_backup.sync()

        # recovery the host cpu env
        for x in range(hostcpu_num):
            cpu.online(x)
        cmd = "echo '0-{}' > {}".format(hostcpu_num - 1, machine_cpuset_path)
        process.run(cmd, shell=True)
Exemplo n.º 18
0
def run(test, params, env):
    """
    KVM multi test:
    1) Log into guests
    2) Check all the nics available or not
    3) Ping among guest nic and host
       3.1) Ping with different packet size
       3.2) Flood ping test
       3.3) Final ping test
    4) Transfer files among guest nics and host
       4.1) Create file by dd command in guest
       4.2) Transfer file between nics
       4.3) Compare original file and transferred file
    5) ping among different nics
       5.1) Ping with different packet size
       5.2) Flood ping test
       5.3) Final ping test
    6) Transfer files among different nics
       6.1) Create file by dd command in guest
       6.2) Transfer file between nics
       6.3) Compare original file and transferred file
    7) Repeat step 3 - 6 on every nic.

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def ping(session, nic, dst_ip, strick_check, flood_minutes):
        d_packet_size = [
            1, 4, 48, 512, 1440, 1500, 1505, 4054, 4055, 4096, 4192, 8878,
            9000, 32767, 65507
        ]
        packet_size = params.get("packet_size", "").split() or d_packet_size
        for size in packet_size:
            error_context.context("Ping with packet size %s" % size,
                                  logging.info)
            status, output = utils_test.ping(dst_ip,
                                             10,
                                             interface=nic,
                                             packetsize=size,
                                             timeout=30,
                                             session=session)
            if strict_check:
                ratio = utils_test.get_loss_ratio(output)
                if ratio != 0:
                    test.fail("Loss ratio is %s for packet size"
                              " %s" % (ratio, size))
            else:
                if status != 0:
                    test.fail("Ping returns non-zero value %s" % output)

        error_context.context("Flood ping test", logging.info)
        utils_test.ping(dst_ip,
                        None,
                        interface=nic,
                        flood=True,
                        output_func=None,
                        timeout=flood_minutes * 60,
                        session=session)
        error_context.context("Final ping test", logging.info)
        counts = params.get("ping_counts", 100)
        status, output = utils_test.ping(dst_ip,
                                         counts,
                                         interface=nic,
                                         timeout=float(counts) * 1.5,
                                         session=session)
        if strick_check == "yes":
            ratio = utils_test.get_loss_ratio(output)
            if ratio != 0:
                test.fail("Packet loss ratio is %s after flood" % ratio)
        else:
            if status != 0:
                test.fail("Ping returns non-zero value %s" % output)

    def file_transfer(session, src, dst):
        username = params.get("username", "")
        password = params.get("password", "")
        src_path = "/tmp/1"
        dst_path = "/tmp/2"
        port = int(params["file_transfer_port"])

        cmd = "dd if=/dev/urandom of=%s bs=100M count=1" % src_path
        cmd = params.get("file_create_cmd", cmd)

        error_context.context("Create file by dd command, cmd: %s" % cmd,
                              logging.info)
        session.cmd(cmd)

        transfer_timeout = int(params.get("transfer_timeout"))
        log_filename = "scp-from-%s-to-%s.log" % (src, dst)
        error_context.context("Transfer file from %s to %s" % (src, dst),
                              logging.info)
        remote.scp_between_remotes(src,
                                   dst,
                                   port,
                                   password,
                                   password,
                                   username,
                                   username,
                                   src_path,
                                   dst_path,
                                   log_filename=log_filename,
                                   timeout=transfer_timeout)
        src_path = dst_path
        dst_path = "/tmp/3"
        log_filename = "scp-from-%s-to-%s.log" % (dst, src)
        error_context.context("Transfer file from %s to %s" % (dst, src),
                              logging.info)
        remote.scp_between_remotes(dst,
                                   src,
                                   port,
                                   password,
                                   password,
                                   username,
                                   username,
                                   src_path,
                                   dst_path,
                                   log_filename=log_filename,
                                   timeout=transfer_timeout)
        error_context.context("Compare original file and transferred file",
                              logging.info)

        cmd1 = "md5sum /tmp/1"
        cmd2 = "md5sum /tmp/3"
        md5sum1 = session.cmd(cmd1).split()[0]
        md5sum2 = session.cmd(cmd2).split()[0]
        if md5sum1 != md5sum2:
            test.error("File changed after transfer")

    nic_interface_list = []
    check_irqbalance_cmd = params.get("check_irqbalance_cmd",
                                      "systemctl status irqbalance")
    stop_irqbalance_cmd = params.get("stop_irqbalance_cmd",
                                     "systemctl stop irqbalance")
    start_irqbalance_cmd = params.get("start_irqbalance_cmd",
                                      "systemctl start irqbalance")
    status_irqbalance = params.get("status_irqbalance",
                                   "Active: active|running")
    vms = params["vms"].split()
    host_mem = utils_memory.memtotal() // (1024 * 1024)
    host_cpu_count = cpu.total_cpus_count()
    vhost_count = 0
    if params.get("vhost"):
        vhost_count = 1
    if host_cpu_count < (1 + vhost_count) * len(vms):
        test.error("The host don't have enough cpus to start guest"
                   "pcus: %d, minimum of vcpus and vhost: %d" %
                   (host_cpu_count, (1 + vhost_count) * len(vms)))
    params['mem'] = host_mem // len(vms) * 1024
    params['smp'] = host_cpu_count // len(vms) - vhost_count
    if params['smp'] % 2 != 0:
        params['vcpu_sockets'] = 1
    params["start_vm"] = "yes"
    for vm_name in vms:
        env_process.preprocess_vm(test, params, env, vm_name)
    timeout = float(params.get("login_timeout", 360))
    strict_check = params.get("strick_check", "no")
    host_ip = utils_net.get_ip_address_by_interface(params.get("netdst"))
    host_ip = params.get("srchost", host_ip)
    flood_minutes = float(params["flood_minutes"])
    error_context.context("Check irqbalance service status", logging.info)
    o = process.system_output(check_irqbalance_cmd,
                              ignore_status=True,
                              shell=True).decode()
    check_stop_irqbalance = False
    if re.findall(status_irqbalance, o):
        logging.debug("stop irqbalance")
        process.run(stop_irqbalance_cmd, shell=True)
        check_stop_irqbalance = True
        o = process.system_output(check_irqbalance_cmd,
                                  ignore_status=True,
                                  shell=True).decode()
        if re.findall(status_irqbalance, o):
            test.error("Can not stop irqbalance")
    thread_list = []
    nic_interface = []
    for vm_name in vms:
        guest_ifname = ""
        guest_ip = ""
        vm = env.get_vm(vm_name)
        session = vm.wait_for_login(timeout=timeout)
        thread_list.extend(vm.vcpu_threads)
        thread_list.extend(vm.vhost_threads)
        error_context.context("Check all the nics available or not",
                              logging.info)
        for index, nic in enumerate(vm.virtnet):
            guest_ifname = utils_net.get_linux_ifname(session, nic.mac)
            guest_ip = vm.get_address(index)
            if not (guest_ifname and guest_ip):
                err_log = "vms %s get ip or ifname failed." % vm_name
                err_log = "ifname: %s, ip: %s." % (guest_ifname, guest_ip)
                test.fail(err_log)
            nic_interface = [guest_ifname, guest_ip, session]
            nic_interface_list.append(nic_interface)
    error_context.context("Pin vcpus and vhosts to host cpus", logging.info)
    host_numa_nodes = utils_misc.NumaInfo()
    vthread_num = 0
    for numa_node_id in host_numa_nodes.nodes:
        numa_node = host_numa_nodes.nodes[numa_node_id]
        for _ in range(len(numa_node.cpus)):
            if vthread_num >= len(thread_list):
                break
            vcpu_tid = thread_list[vthread_num]
            logging.debug("pin vcpu/vhost thread(%s) to cpu(%s)" %
                          (vcpu_tid, numa_node.pin_cpu(vcpu_tid)))
            vthread_num += 1

    nic_interface_list_len = len(nic_interface_list)
    # ping and file transfer test
    for src_ip_index in range(nic_interface_list_len):
        error_context.context("Ping test from guest to host", logging.info)
        src_ip_info = nic_interface_list[src_ip_index]
        ping(src_ip_info[2], src_ip_info[0], host_ip, strict_check,
             flood_minutes)
        error_context.context("File transfer test between guest and host",
                              logging.info)
        file_transfer(src_ip_info[2], src_ip_info[1], host_ip)
        for dst_ip in nic_interface_list[src_ip_index:]:
            if src_ip_info[1] == dst_ip[1]:
                continue
            txt = "Ping test between %s and %s" % (src_ip_info[1], dst_ip[1])
            error_context.context(txt, logging.info)
            ping(src_ip_info[2], src_ip_info[0], dst_ip[1], strict_check,
                 flood_minutes)
            txt = "File transfer test between %s " % src_ip_info[1]
            txt += "and %s" % dst_ip[1]
            error_context.context(txt, logging.info)
            file_transfer(src_ip_info[2], src_ip_info[1], dst_ip[1])
    if check_stop_irqbalance:
        process.run(start_irqbalance_cmd, shell=True)
Exemplo n.º 19
0
def run(test, params, env):
    """
    Test virsh cpu-stats command.

    The command can display domain per-CPU and total statistics.
    1. Call virsh cpu-stats [domain]
    2. Call virsh cpu-stats [domain] with valid options
    3. Call virsh cpu-stats [domain] with invalid options
    """
    def get_cpuacct_info(suffix):
        """
        Get the CPU accounting info within the vm

        :param suffix: str, suffix of the CPU accounting.(stat/usage/usage_percpu)
        :return: list, the list of CPU accounting info
        """
        if 'cg_obj' not in locals():
            return
        # On cgroup v2 use cpu.stat as a substitute
        if cg_obj.is_cgroup_v2_enabled():
            cg_path = cg_obj.get_cgroup_path("cpu")
            para = ('cpu.%s' % suffix)
        else:
            cg_path = cg_obj.get_cgroup_path("cpuacct")
            para = ('cpuacct.%s' % suffix)
        # We only need the info in file which "emulator" is not in path
        if os.path.basename(cg_path) == "emulator":
            cg_path = os.path.dirname(cg_path)
        usage_file = os.path.join(cg_path, para)
        with open(usage_file, 'r') as f:
            cpuacct_info = f.read().strip().split()
        logging.debug("cpuacct info %s", cpuacct_info)
        return cpuacct_info

    def check_user_and_system_time(total_list):
        user_time = float(total_list[4])
        system_time = float(total_list[7])

        # Check libvirt user and system time between pre and next cgroup time
        # Unit conversion (Unit: second)
        # Default time unit is microseconds on cgroup v2 while 1/100 second on v1
        if cg_obj.is_cgroup_v2_enabled():
            pre_user_time = float(cpuacct_res_pre[3]) / 1000000
            pre_sys_time = float(cpuacct_res_pre[5]) / 1000000
            next_user_time = float(cpuacct_res_next[3]) / 1000000
            next_sys_time = float(cpuacct_res_next[5]) / 1000000
        else:
            pre_user_time = float(cpuacct_res_pre[1]) / 100
            pre_sys_time = float(cpuacct_res_pre[3]) / 100
            next_user_time = float(cpuacct_res_next[1]) / 100
            next_sys_time = float(cpuacct_res_next[3]) / 100

        # check user_time
        if next_user_time >= user_time >= pre_user_time:
            logging.debug("Got the expected user_time: %s", user_time)

        else:
            test.fail("Got unexpected user_time: %s, " % user_time +
                      "should between pre_user_time:%s " % pre_user_time +
                      "and next_user_time:%s" % next_user_time)

        # check system_time
        if next_sys_time >= system_time >= pre_sys_time:
            logging.debug("Got the expected system_time: %s", system_time)

        else:
            test.fail("Got unexpected system_time: %s, " % system_time +
                      "should between pre_sys_time:%s " % pre_sys_time +
                      "and next_sys_time:%s" % next_sys_time)

    if not virsh.has_help_command('cpu-stats'):
        test.cancel("This version of libvirt does not support "
                    "the cpu-stats test")

    vm_name = params.get("main_vm", "vm1")
    vm_ref = params.get("cpu_stats_vm_ref")
    status_error = params.get("status_error", "no")
    options = params.get("cpu_stats_options")
    error_msg = params.get("error_msg", "")
    logging.debug("options are %s", options)

    if vm_ref == "name":
        vm_ref = vm_name

    vm = env.get_vm(vm_ref)
    if vm and vm.get_pid():
        cg_obj = libvirt_cgroup.CgroupTest(vm.get_pid())
    # get host cpus num
    cpus = cpu.online_cpus_count()
    logging.debug("host online cpu num is %s", cpus)

    # get options and put into a dict
    get_total = re.search('total', options)
    get_start = re.search('start', options)
    get_count = re.search('count', options)

    # command without options
    get_noopt = 0
    if not get_total and not get_start and not get_count:
        get_noopt = 1

    # command with only --total option
    get_totalonly = 0
    if not get_start and not get_count and get_total:
        get_totalonly = 1

    option_dict = {}
    if options.strip():
        option_list = options.split('--')
        logging.debug("option_list is %s", option_list)
        for match in option_list[1:]:
            if get_start or get_count:
                option_dict[match.split(' ')[0]] = match.split(' ')[1]

    # check if cpu is enough,if not cancel the test
    if (status_error == "no"):
        cpu_start = int(option_dict.get("start", "0"))
        if cpu_start == 32:
            cpus = cpu.total_cpus_count()
            logging.debug("Host total cpu num: %s", cpus)
        if (cpu_start >= cpus):
            test.cancel("Host cpus are not enough")

    # get CPU accounting info twice to compare with user_time and system_time
    cpuacct_res_pre = get_cpuacct_info('stat')

    # Run virsh command
    cmd_result = virsh.cpu_stats(vm_ref,
                                 options,
                                 ignore_status=True,
                                 debug=True)
    output = cmd_result.stdout.strip()
    status = cmd_result.exit_status

    cpuacct_res_next = get_cpuacct_info('stat')

    # check status_error
    if status_error == "yes":
        if status == 0:
            test.fail("Run successfully with wrong command! Output: {}".format(
                output))
        else:
            # Check error message is expected
            if not re.search(error_msg, cmd_result.stderr.strip()):
                test.fail("Error message is not expected! "
                          "Expected: {} Actual: {}".format(
                              error_msg, cmd_result.stderr.strip()))
    elif status_error == "no":
        if status != 0:
            test.fail("Run failed with right command! Error: {}".format(
                cmd_result.stderr.strip()))
        else:
            # Get cgroup cpu_time
            if not get_totalonly:
                cgtime = get_cpuacct_info('usage_percpu')

            # Cut CPUs from output and format to list
            if get_total:
                mt_start = re.search('Total', output).start()
            else:
                mt_start = len(output)
            output_cpus = " ".join(output[:mt_start].split())
            cpus_list = re.compile(r'CPU\d+:').split(output_cpus)

            # conditions that list total time info
            if get_noopt or get_total:
                mt_end = re.search('Total', output).end()
                total_list = output[mt_end + 1:].split()
                total_time = float(total_list[1])
                check_user_and_system_time(total_list)

            start_num = 0
            if get_start:
                start_num = int(option_dict["start"])

            end_num = int(cpus)
            if get_count:
                count_num = int(option_dict["count"])
                if end_num > start_num + count_num:
                    end_num = start_num + count_num

            # for only give --total option it only shows "Total" cpu info
            if get_totalonly:
                end_num = -1

            # find CPU[N] in output and sum the cpu_time and cgroup cpu_time
            sum_cputime = 0
            sum_cgtime = 0
            logging.debug("start_num %d, end_num %d", start_num, end_num)
            for i in range(start_num, end_num):
                logging.debug("Check CPU" + "%i" % i + " exist")
                sum_cputime += float(cpus_list[i - start_num + 1].split()[1])
                sum_cgtime += float(cgtime[i])
                if not re.search('CPU' + "%i" % i, output):
                    test.fail("Fail to find CPU" + "%i" % i + "in " "result")

            # check cgroup cpu_time > sum of cpu_time
            if end_num >= 0:
                logging.debug("Check sum of cgroup cpu_time %d >= cpu_time %d",
                              sum_cgtime, sum_cputime)
                if sum_cputime > sum_cgtime:
                    test.fail("Check sum of cgroup cpu_time < sum "
                              "of output cpu_time")

            # check Total cpu_time >= sum of cpu_time when no options
            if get_noopt:
                logging.debug(
                    "Check total time %d >= sum of output cpu_time"
                    " %d", total_time, sum_cputime)
                if total_time < sum_cputime:
                    test.fail("total time < sum of output cpu_time")
Exemplo n.º 20
0
def run(test, params, env):
    """
    Test vcpu affinity feature as follows:
    positive test:
        1. use vcpu cpuset in xml to define vcpu affinity
        2. use cputune cpuset in xml to define vcpu affinity
        3. use offline-to-online host cpu as cpuset to run virsh vcpupin
        4. set vcpu placement in xml to auto and check xml result
        5. set vcpu cpuset in xml without placement defined and check xml result
        6. specify vcpu affinity for inactive vcpu
    negative test:
        1. use outrange cpuset as vcpu cpuset in xml to define vcpu affinity
        2. use outrange cpuset as cputune cpuset in xml to define vcpu affinity
        3. use invalid cpuset as cputune cpuset in xml to define vcpu affinity
        4. use duplicate vcpu in xml to define vcpu affinity
        5. use offline host cpu as cputune cpuset to run virsh vcpupin
        6. set vcpu affinity for none exists vcpu and check xml result
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    cpuset_mask = params.get("cpuset_mask", "")
    vcpu = params.get("vcpu", "0")
    setvcpus_option = params.get("setvcpus_option", "")
    setvcpus_count = params.get("setvcpus_count", "0")
    vcpupin_option = params.get("vcpupin_option", "")
    maxvcpu = params.get("maxvcpu", "8")
    current_vcpu = params.get("current_vcpu", "3")
    check = params.get("check", "")
    config_xml = params.get("config_xml", "")

    status_error = "yes" == params.get("status_error", "no")
    define_fail = "yes" == params.get("define_fail", "no")
    start_fail = "yes" == params.get("start_fail", "no")
    runtime_fail = "yes" == params.get("runtime_fail", "no")
    hotplug_vcpu = "yes" == params.get("hotplug_vcpu", "no")

    vcpu_cpuset = params.get("vcpu_cpuset", "")
    cputune_cpuset = params.get("cputune_cpuset", "")
    vcpu_placement = params.get("vcpu_placement", "static")
    err_msg = params.get("err_msg", "")
    start_timeout = int(params.get("start_timeout", "180"))
    offline_hostcpus = params.get("offline_hostcpus", "")
    machine_cpuset_path = params.get("machine_cpuset_path", "")

    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    def check_vcpu_affinity():
        """
        check vcpu affinity defined by vcpu cpuset or cputune cpuset
        """
        affinity = vcpu_cpuset if not cputune_cpuset else cputune_cpuset
        affinity = {vcpu: affinity}
        virsh.vcpuinfo(vm_name, debug=True)
        host_cpu_count = cpuutil.total_cpus_count()

        vmxml_live = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(vmxml_live)

        # if vcpu >= maxvcpu, the cputune should not exist in xml
        if int(vcpu) >= int(maxvcpu):
            try:
                if hasattr(vmxml_live, 'cputune'):
                    test.fail("cputune tag is set when vcpu >= maxvcpu")
            except xcepts.LibvirtXMLError:
                pass
        elif "config" in vcpupin_option:
            vcpu_affinity = cpu.affinity_from_vcpupin(vm, vcpu, vcpupin_option)
            affinity = cpu.cpus_string_to_affinity_list(
                str(affinity[vcpu]), host_cpu_count)
            logging.debug("vcpu_affinity {}".format(vcpu_affinity))
            logging.debug("affinity {}".format(affinity))
            if vcpu_affinity[int(vcpu)] != affinity:
                test.fail("vcpu affinity check fail")
        # check the expected vcpu affinity with the one got from running vm
        elif not cpu.check_affinity(vm, affinity):
            test.fail("vcpu affinity check fail")

    try:
        hostcpu_num = int(cpuutil.total_cpus_count())
        if hostcpu_num < 8:
            test.cancel("The host should have at least 8 CPUs for this test.")

        # online all host cpus
        for x in range(1, hostcpu_num):
            if cpuutil.online(x):
                test.fail("fail to online cpu{}".format(x))

        # use vcpu cpuset or/and cputune cpuset to define xml
        del vmxml.cputune
        del vmxml.vcpus
        del vmxml.placement
        vmxml.vcpu = int(maxvcpu)
        vmxml.current_vcpu = current_vcpu

        # Remove cpu topology to avoid that it doesn't match vcpu count
        if vmxml.get_cpu_topology():
            new_cpu = vmxml.cpu
            del new_cpu.topology
            vmxml.cpu = new_cpu

        # config vcpu cpuset for cpuset range test
        num = 1 if not status_error else 0
        cpuset_new = "0-{},^{}".format(hostcpu_num-num, cpuset_mask)
        if (config_xml == "vcpu" and check.endswith("range_cpuset")):
            vcpu_cpuset = cpuset_new
        vmxml.cpuset = vcpu_cpuset

        if vcpu_placement:
            vmxml.placement = vcpu_placement

            # Remove numatune node since it will be automatically set
            # under 'auto' state
            if vcpu_placement == 'auto':
                vmxml.xmltreefile.remove_by_xpath('/numatune', remove_all=True)
                vmxml.xmltreefile.write()

        if config_xml == "cputune":
            cputune = vm_xml.VMCPUTuneXML()
            if check.endswith("range_cpuset"):
                cputune_cpuset = cpuset_new
            if check.endswith("duplicate_vcpu"):
                cputune.vcpupins = [{'vcpu': vcpu, 'cpuset': "2"}, {'vcpu': vcpu, 'cpuset': "3"}]
            else:
                cputune.vcpupins = [{'vcpu': vcpu, 'cpuset': cputune_cpuset}]
            vmxml.cputune = cputune

        logging.debug(vmxml)
        if status_error and define_fail:
            result_to_check = virsh.define(vmxml.xml, debug=True)
        else:
            vmxml.sync()

        # test vcpu cpuset in offline/online host cpu scenario
        if check.endswith("offline_hostcpu"):
            for x in offline_hostcpus.split(','):
                if cpuutil.offline(x):
                    test.fail("fail to offline cpu{}".format(x))
                logging.debug("offline host cpu {}".format(x))

        # start the vm
        if status_error and start_fail:
            result_to_check = virsh.start(vm_name, debug=True)

        if (not status_error) or runtime_fail:
            vm.start()
            vm.wait_for_login(timeout=start_timeout).close()

            # test vcpu cpuset in offline/online host cpu scenario
            if check.endswith("offline_hostcpu") and not status_error:
                # online host cpu
                if cpuutil.online(cputune_cpuset):
                    test.fail("fail to online cpu{}".format(cputune_cpuset))

            # run virsh vcpupin to config vcpu affinity
            if check.startswith("cputune") and (not config_xml):
                result_to_check = virsh.vcpupin(vm_name, vcpu, cputune_cpuset, vcpupin_option, debug=True)

            # hotplug vcpu test scenario
            if hotplug_vcpu:
                virsh.setvcpus(vm_name, setvcpus_count, setvcpus_option, debug=True, ignore_status=False)

            libvirtd_restart = False
            while True:
                if check == "vcpu_placement":
                    check_vcpu_placement(test, params)
                elif not status_error:
                    check_vcpu_affinity()
                if libvirtd_restart:
                    break
                # restart libvirtd and check vcpu affinity again
                utils_libvirtd.Libvirtd().restart()
                libvirtd_restart = True

        if 'result_to_check' in locals():
            if err_msg:
                err_msg = err_msg.split(";")
            libvirt.check_result(result_to_check, err_msg)

    finally:
        vmxml_backup.sync()

        # recovery the host cpu env
        for x in range(1, hostcpu_num):
            cpuutil.online(x)
        cmd = "echo '0-{}' > {}".format(hostcpu_num-1, machine_cpuset_path)
        process.run(cmd, shell=True)
Exemplo n.º 21
0
def run(test, params, env):
    """
    Test the command virsh vcpupin

    (1) Get the host and guest cpu count
    (2) Call virsh vcpupin for each vcpu with pinning of each cpu
    (3) Check whether the virsh vcpupin has pinned the respective vcpu to cpu
    """

    def affinity_from_vcpuinfo(vm_name, vcpu):
        """
        This function returns list of the vcpu's affinity from
        virsh vcpuinfo output

        :param vm_name: VM Name to operate on
        :param vcpu: vcpu number for which the affinity is required
        """

        output = virsh.vcpuinfo(vm_name).stdout.rstrip()
        affinity = re.findall('CPU Affinity: +[-y]+', output)
        total_affinity = affinity[int(vcpu)].split()[-1].strip()
        actual_affinity = list(total_affinity)
        return actual_affinity

    def affinity_from_vcpupin(vm_name, vcpu):
        """
        This function returns list of vcpu's affinity from vcpupin output

        :param vm_name: VM Name
        :param vcpu: VM cpu pid
        :return : list of affinity to vcpus
        """
        total_cpu = process.run("ls -d /sys/devices/system/cpu/cpu[0-9]* |wc -l", shell=True).stdout.strip()
        vcpus_affinity = {}
        output = virsh.vcpupin(vm_name).stdout
        for item in output.split('\n')[2:-2]:
            vcpus_affinity[item.split(':')[0].strip()] = item.split(':')[1].strip()
        return utils_test.libvirt.cpus_string_to_affinity_list(
            vcpus_affinity[str(vcpu)], int(total_cpu))

    def check_vcpupin(vm_name, vcpu, cpu_list, pid, vcpu_pid):
        """
        This function checks the actual and the expected affinity of given vcpu
        and raises error if not matchs

        :param vm_name: VM Name to operate on
        :param vcpu: vcpu number for which the affinity is required
        :param cpu: cpu details for the affinity
        :param pid: VM pid
        :param vcpu: VM cpu pid
        """

        total_cpu = process.run("ls -d /sys/devices/system/cpu/cpu[0-9]* |wc -l", shell=True).stdout.strip()
        logging.debug("Debug: cpulist %s", cpu_list)
        expected_output = utils_test.libvirt.cpus_string_to_affinity_list(
            cpu_list,
            int(total_cpu))
        logging.debug("Expected affinity: %s", expected_output)

        # Check for affinity value from vcpuinfo output
        actual_output = affinity_from_vcpuinfo(vm_name, vcpu)
        logging.debug("Actual affinity in vcpuinfo output: %s", actual_output)
        if expected_output == actual_output:
            logging.info("successfully pinned cpu_list: %s --> vcpu: %s",
                         cpu_list, vcpu)
        else:
            test.fail("Cpu pinning details not updated properly in"
                      " virsh vcpuinfo command output")

        # Check for affinity value from vcpupin output
        actual_output_vcpupin = affinity_from_vcpupin(vm_name, vcpu)
        logging.debug("Actual affinity in vcpupin output: %s", actual_output_vcpupin)
        if expected_output == actual_output_vcpupin:
            logging.info("successfully pinned cpu_list: %s --> vcpu: %s",
                         cpu_list, vcpu)
        else:
            test.fail("Cpu pinning details not updated properly in"
                      " virsh vcpupin command output")

        if pid is None:
            return
        # Get the actual cpu affinity value in the proc entry
        output = utils_test.libvirt.cpu_allowed_list_by_task(pid, vcpu_pid)
        actual_output_proc = utils_test.libvirt.cpus_string_to_affinity_list(
            output,
            int(total_cpu))
        logging.debug("Actual affinity in guest proc: %s", actual_output_proc)
        if expected_output == actual_output_proc:
            logging.info("successfully pinned vcpu: %s --> cpu: %s"
                         " in respective proc entry", vcpu, cpu_list)
        else:
            test.fail("Cpu pinning details are not "
                      "updated properly in /proc/"
                      "%s/task/%s/status" % (pid, vcpu_pid))

    def run_and_check_vcpupin(vm, vm_ref, vcpu, cpu_list, options):
        """
        Run the vcpupin command and then check the result.
        """
        if vm_ref == "name":
            vm_ref = vm.name
        elif vm_ref == "uuid":
            vm_ref = vm.get_uuid()
        # Execute virsh vcpupin command.
        cmdResult = virsh.vcpupin(vm_ref, vcpu, cpu_list, options, debug=True)
        if cmdResult.exit_status:
            if not status_error:
                # Command fail and it is positive case.
                test.fail(cmdResult)
            else:
                # Command fail and it is negative case.
                return
        else:
            if status_error:
                # Command success and it is negative case.
                test.fail(cmdResult)
            else:
                # Command success and it is positive case.
                # "--config" will take effect after VM destroyed.
                pid = None
                vcpu_pid = None
                if options == "--config":
                    virsh.destroy(vm.name)
                else:
                    pid = vm.get_pid()
                    logging.debug("vcpus_pid: %s", vm.get_vcpus_pid())
                    vcpu_pid = vm.get_vcpus_pid()[vcpu]
                # Check the result of vcpupin command.
                check_vcpupin(vm.name, vcpu, cpu_list, pid, vcpu_pid)

    def offline_pin_and_check(vm, vcpu, cpu_list):
        """
        Edit domain xml to pin vcpu and check the result.
        """
        cputune = vm_xml.VMCPUTuneXML()
        cputune.vcpupins = [{'vcpu': str(vcpu), 'cpuset': cpu_list}]
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        vmxml.cputune = cputune
        vmxml.sync()
        utils_misc.wait_for(lambda: vm.state() == "shut off", 10)
        cmdResult = virsh.start(vm.name, debug=True)
        libvirt.check_exit_status(cmdResult, status_error)
        pid = vm.get_pid()
        vcpu_pid = vm.get_vcpus_pid()[vcpu]
        check_vcpupin(vm.name, vcpu, cpu_list, pid, vcpu_pid)

    if not virsh.has_help_command('vcpucount'):
        test.cancel("This version of libvirt doesn't"
                    " support this test")
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    # Get the variables for vcpupin command.
    vm_ref = params.get("vcpupin_vm_ref", "name")
    options = params.get("vcpupin_options", "--current")
    cpu_list = params.get("vcpupin_cpu_list", "x")
    start_vm = ("yes" == params.get("start_vm", "yes"))
    vcpupin_initial = ("yes" == params.get("vcpupin_initial", "no"))

    # Get status of this case.
    status_error = ("yes" == params.get("status_error", "no"))

    # Edit domain xml to pin vcpus
    offline_pin = ("yes" == params.get("offline_pin", "no"))

    # Backup for recovery.
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    if start_vm and vm.state() == "shut off":
        cmdResult = virsh.start(vm.name, debug=True)
        libvirt.check_exit_status(cmdResult, status_error)

    # Get the guest vcpu count
    if offline_pin:
        vcpucount_option = "--config --active"
    else:
        vcpucount_option = "--live --active"
    guest_vcpu_count = virsh.vcpucount(vm_name,
                                       vcpucount_option).stdout.strip()

    # Find the alive cpus list
    cpus_list = map(str, cpuutils.cpu_online_list())
    logging.info("Active cpus in host are %s", cpus_list)

    try:
        # Control multi domain vcpu affinity
        multi_dom = ("yes" == params.get("multi_dom_pin", "no"))
        vm2 = None
        # Before doing any vcpupin actions, lets check whether
        # initial pinning state is fine
        if vcpupin_initial:
            pid = vm.get_pid()
            logging.debug("vcpus_pid: %s vcpu count: %s", vm.get_vcpus_pid(), guest_vcpu_count)
            for vcpu in range(int(guest_vcpu_count)):
                vcpu_pid = vm.get_vcpus_pid()[vcpu]
                # Check the result of vcpupin command.
                check_vcpupin(vm.name, vcpu, str(','.join(cpus_list)), pid, vcpu_pid)
            return

        if multi_dom:
            vm_names = params.get("vms").split()
            if len(vm_names) > 1:
                vm2 = env.get_vm(vm_names[1])
            else:
                test.error("Need more than one domains")
            if not vm2:
                test.cancel("No %s find" % vm_names[1])
            vm2.destroy()
            vm2xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm2.name)
            vm2xml_backup = vm2xml.copy()
            # Make sure vm2 has the same cpu numbers with vm
            vm2xml.set_vm_vcpus(vm2.name, int(guest_vcpu_count), guest_vcpu_count)
            if start_vm:
                vm2.start()

        # Run cases when guest is shutoff.
        if not offline_pin:
            if vm.is_dead() and not start_vm:
                run_and_check_vcpupin(vm, vm_ref, 0, 0, "")
                return
        # Get the host cpu count
        host_online_cpu_count = len(cpus_list)
        online_cpu_max = max(map(int, cpus_list))
        host_cpu_count = cpuutils.total_cpus_count()
        cpu_max = int(host_cpu_count) - 1
        if (host_online_cpu_count < 2) and (not cpu_list == "x"):
            test.cancel("We need more cpus on host in this "
                        "case for the cpu_list=%s. But "
                        "current number of cpu on host is %s."
                        % (cpu_list, host_online_cpu_count))

        # Run test case
        for vcpu in range(int(guest_vcpu_count)):
            if cpu_list == "x":
                for cpu in cpus_list:
                    left_cpus = "0-%s,^%s" % (online_cpu_max, cpu)
                    if offline_pin:
                        offline_pin_and_check(vm, vcpu, str(cpu))
                        if multi_dom:
                            offline_pin_and_check(vm2, vcpu, left_cpus)
                    else:
                        run_and_check_vcpupin(vm, vm_ref, vcpu, str(cpu),
                                              options)
                        if multi_dom:
                            run_and_check_vcpupin(vm2, "name", vcpu, left_cpus,
                                                  options)
            else:
                if cpu_list == "x-y":
                    cpus = "0-%s" % online_cpu_max
                elif cpu_list == "x,y":
                    cpus = ','.join(random.sample(cpus_list, 2))
                    logging.info(cpus)
                elif cpu_list == "x-y,^z":
                    cpus = "0-%s,^%s" % (online_cpu_max, online_cpu_max)
                elif cpu_list == "r":
                    cpus = "r"
                elif cpu_list == "-1":
                    cpus = "-1"
                elif cpu_list == "out_of_max":
                    cpus = str(cpu_max + 1)
                else:
                    test.cancel("Cpu_list=%s is not recognized."
                                % cpu_list)
                if offline_pin:
                    offline_pin_and_check(vm, vcpu, cpus)
                else:
                    run_and_check_vcpupin(vm, vm_ref, vcpu, cpus, options)
    finally:
        # Recover xml of vm.
        vmxml_backup.sync()
        if vm2:
            vm2xml_backup.sync()
Exemplo n.º 22
0
def run(test, params, env):
    """
    Time drift test (mainly for Windows guests):

    1) Log into a guest.
    2) Take a time reading from the guest and host.
    3) Run load on the guest and host.
    4) Take a second time reading.
    5) Stop the load and rest for a while.
    6) Take a third time reading.
    7) If the drift immediately after load is higher than a user-
    specified value (in %), fail.
    If the drift after the rest period is higher than a user-specified value,
    fail.

    :param test: QEMU test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """

    # Helper functions
    def set_cpu_affinity(pid, mask):
        """
        Set the CPU affinity of all threads of the process with PID pid.
        Do this recursively for all child processes as well.

        :param pid: The process ID.
        :param mask: The CPU affinity mask.
        :return: A dict containing the previous mask for each thread.
        """
        tids = decode_to_text(
            process.system_output("ps -L --pid=%s -o lwp=" % pid,
                                  verbose=False,
                                  ignore_status=True)).split()
        prev_masks = {}
        for tid in tids:
            prev_mask = decode_to_text(
                process.system_output("taskset -p %s" % tid,
                                      verbose=False)).split()[-1]
            prev_masks[tid] = prev_mask
            process.system("taskset -p %s %s" % (mask, tid), verbose=False)
        children = decode_to_text(
            process.system_output("ps --ppid=%s -o pid=" % pid,
                                  verbose=False,
                                  ignore_status=True)).split()
        for child in children:
            prev_masks.update(set_cpu_affinity(child, mask))
        return prev_masks

    def restore_cpu_affinity(prev_masks):
        """
        Restore the CPU affinity of several threads.

        :param prev_masks: A dict containing TIDs as keys and masks as values.
        """
        for tid, mask in prev_masks.items():
            process.system("taskset -p %s %s" % (mask, tid),
                           verbose=False,
                           ignore_status=True)

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    boot_option_added = params.get("boot_option_added")
    boot_option_removed = params.get("boot_option_removed")
    if boot_option_added or boot_option_removed:
        utils_test.update_boot_option(vm,
                                      args_removed=boot_option_removed,
                                      args_added=boot_option_added)

    if params["os_type"] == "windows":
        utils_time.sync_timezone_win(vm)

    timeout = int(params.get("login_timeout", 360))
    session = vm.wait_for_serial_login(timeout=timeout)

    # Collect test parameters:
    # Command to run to get the current time
    time_command = params["time_command"]
    # Filter which should match a string to be passed to time.strptime()
    time_filter_re = params["time_filter_re"]
    # Time format for time.strptime()
    time_format = params["time_format"]
    guest_load_command = params["guest_load_command"]
    guest_load_stop_command = params["guest_load_stop_command"]
    host_load_command = params["host_load_command"]
    guest_load_instances = params["guest_load_instances"]
    host_load_instances = params["host_load_instances"]
    if not guest_load_instances and not host_load_instances:
        host_load_instances = cpu.total_cpus_count()
        guest_load_instances = vm.get_cpu_count()
    else:
        host_load_instances = int(host_load_instances)
        guest_load_instances = int(guest_load_instances)
    # CPU affinity mask for taskset
    cpu_mask = int(params.get("cpu_mask", "0xFF"), 16)
    load_duration = float(params.get("load_duration", "30"))
    rest_duration = float(params.get("rest_duration", "10"))
    drift_threshold = float(params.get("drift_threshold", "200"))
    drift_threshold_after_rest = float(
        params.get("drift_threshold_after_rest", "200"))
    test_duration = float(params.get("test_duration", "60"))
    interval_gettime = float(params.get("interval_gettime", "20"))
    guest_load_sessions = []
    host_load_sessions = []

    try:
        # Set the VM's CPU affinity
        prev_affinity = set_cpu_affinity(vm.get_shell_pid(), cpu_mask)

        try:
            # Open shell sessions with the guest
            logging.info("Starting load on guest...")
            for i in range(guest_load_instances):
                load_session = vm.wait_for_login(timeout=timeout)
                # Set output func to None to stop it from being called so we
                # can change the callback function and the parameters it takes
                # with no problems
                load_session.set_output_func(None)
                load_session.set_output_params(())
                load_session.set_output_prefix("(guest load %d) " % i)
                load_session.set_output_func(logging.debug)
                guest_load_sessions.append(load_session)

            # Get time before load
            # (ht stands for host time, gt stands for guest time)
            (ht0, gt0) = utils_test.get_time(session, time_command,
                                             time_filter_re, time_format)

            # Run some load on the guest
            if params["os_type"] == "linux":
                for i, load_session in enumerate(guest_load_sessions):
                    load_session.sendline(guest_load_command % i)
            else:
                for load_session in guest_load_sessions:
                    load_session.sendline(guest_load_command)

            # Run some load on the host
            logging.info("Starting load on host...")
            for i in range(host_load_instances):
                load_cmd = aexpect.run_bg(host_load_command,
                                          output_func=logging.debug,
                                          output_prefix="(host load %d) " % i,
                                          timeout=0.5)
                host_load_sessions.append(load_cmd)
                # Set the CPU affinity of the load process
                pid = load_cmd.get_pid()
                set_cpu_affinity(pid, cpu_mask << i)

            # Sleep for a while (during load)
            logging.info("Sleeping for %s seconds...", load_duration)
            time.sleep(load_duration)

            start_time = time.time()
            while (time.time() - start_time) < test_duration:
                # Get time delta after load
                (ht1, gt1) = utils_test.get_time(session, time_command,
                                                 time_filter_re, time_format)

                # Report results
                host_delta = ht1 - ht0
                guest_delta = gt1 - gt0
                drift = 100.0 * (host_delta - guest_delta) / host_delta
                logging.info("Host duration: %.2f", host_delta)
                logging.info("Guest duration: %.2f", guest_delta)
                logging.info("Drift: %.2f%%", drift)
                time.sleep(interval_gettime)

        finally:
            logging.info("Cleaning up...")
            # Restore the VM's CPU affinity
            restore_cpu_affinity(prev_affinity)
            # Stop the guest load
            if guest_load_stop_command:
                session.cmd_output(guest_load_stop_command)
            # Close all load shell sessions
            for load_session in guest_load_sessions:
                load_session.close()
            for load_session in host_load_sessions:
                load_session.close()

        # Sleep again (rest)
        logging.info("Sleeping for %s seconds...", rest_duration)
        time.sleep(rest_duration)

        # Get time after rest
        (ht2, gt2) = utils_test.get_time(session, time_command, time_filter_re,
                                         time_format)

    finally:
        session.close()
        # remove flags add for this test.
        if boot_option_added or boot_option_removed:
            utils_test.update_boot_option(vm,
                                          args_removed=boot_option_added,
                                          args_added=boot_option_removed)

    # Report results
    host_delta_total = ht2 - ht0
    guest_delta_total = gt2 - gt0
    drift_total = 100.0 * (host_delta_total - guest_delta_total) / host_delta
    logging.info("Total host duration including rest: %.2f", host_delta_total)
    logging.info("Total guest duration including rest: %.2f",
                 guest_delta_total)
    logging.info("Total drift after rest: %.2f%%", drift_total)

    # Fail the test if necessary
    if abs(drift) > drift_threshold:
        test.fail("Time drift too large: %.2f%%" % drift)
    if abs(drift_total) > drift_threshold_after_rest:
        test.fail("Time drift too large after rest period: %.2f%%" %
                  drift_total)
Exemplo n.º 23
0
def run(test, params, env):
    """
    Test virsh cpu-stats command.

    The command can display domain per-CPU and total statistics.
    1. Call virsh cpu-stats [domain]
    2. Call virsh cpu-stats [domain] with valid options
    3. Call virsh cpu-stats [domain] with invalid options
    """

    if not virsh.has_help_command('cpu-stats'):
        test.cancel("This version of libvirt does not support "
                    "the cpu-stats test")

    vm_name = params.get("main_vm", "vm1")
    vm_ref = params.get("cpu_stats_vm_ref")
    status_error = params.get("status_error", "no")
    options = params.get("cpu_stats_options")
    error_msg = params.get("error_msg", "")
    logging.debug("options are %s", options)

    if vm_ref == "name":
        vm_ref = vm_name

    # get host cpus num
    cpus = cpu.online_cpus_count()
    logging.debug("host online cpu num is %s", cpus)

    # get options and put into a dict
    get_total = re.search('total', options)
    get_start = re.search('start', options)
    get_count = re.search('count', options)

    # command without options
    get_noopt = 0
    if not get_total and not get_start and not get_count:
        get_noopt = 1

    # command with only --total option
    get_totalonly = 0
    if not get_start and not get_count and get_total:
        get_totalonly = 1

    option_dict = {}
    if options.strip():
        option_list = options.split('--')
        logging.debug("option_list is %s", option_list)
        for match in option_list[1:]:
            if get_start or get_count:
                option_dict[match.split(' ')[0]] = match.split(' ')[1]

    # check if cpu is enough,if not cancel the test
    if (status_error == "no"):
        cpu_start = int(option_dict.get("start", "0"))
        if cpu_start == 32:
            cpus = cpu.total_cpus_count()
            logging.debug("Host total cpu num: %s", cpus)
        if (cpu_start >= cpus):
            test.cancel("Host cpus are not enough")

    # Run virsh command
    cmd_result = virsh.cpu_stats(vm_ref,
                                 options,
                                 ignore_status=True,
                                 debug=True)
    output = cmd_result.stdout.strip()
    status = cmd_result.exit_status

    # check status_error
    if status_error == "yes":
        if status == 0:
            test.fail("Run successfully with wrong command! Output: {}".format(
                output))
        else:
            # Check error message is expected
            if not re.search(error_msg, cmd_result.stderr.strip()):
                test.fail("Error message is not expected! "
                          "Expected: {} Actual: {}".format(
                              error_msg, cmd_result.stderr.strip()))
    elif status_error == "no":
        if status != 0:
            test.fail("Run failed with right command! Error: {}".format(
                cmd_result.stderr.strip()))
        else:
            # Get cgroup cpu_time
            if not get_totalonly:
                vm = env.get_vm(vm_ref)
                cgpath = utils_cgroup.resolve_task_cgroup_path(
                    vm.get_pid(), "cpuacct")
                # When a VM has an 'emulator' child cgroup present, we must
                # strip off that suffix when detecting the cgroup for a machine
                if os.path.basename(cgpath) == "emulator":
                    cgpath = os.path.dirname(cgpath)
                usage_file = os.path.join(cgpath, "cpuacct.usage_percpu")
                with open(usage_file, 'r') as f:
                    cgtime = f.read().strip().split()
                logging.debug("cgtime get is %s", cgtime)

            # Cut CPUs from output and format to list
            output = re.sub(r'\.', '', output)
            if get_total:
                mt_start = re.search('Total', output).start()
            else:
                mt_start = len(output)
            output_cpus = " ".join(output[:mt_start].split())
            cpus_list = re.compile(r'CPU\d+:').split(output_cpus)

            # conditions that list total time info
            if get_noopt or get_total:
                mt_end = re.search('Total', output).end()
                total_list = output[mt_end + 1:].split()

                total_time = int(total_list[1])
                user_time = int(total_list[4])
                system_time = int(total_list[7])

                # check Total cpu_time >= User + System cpu_time
                if user_time + system_time >= total_time:
                    test.fail("total cpu_time < user_time + " "system_time")
                logging.debug(
                    "Check total cpu_time %d >= user + system "
                    "cpu_time %d", total_time, user_time + system_time)

            start_num = 0
            if get_start:
                start_num = int(option_dict["start"])

            end_num = int(cpus)
            if get_count:
                count_num = int(option_dict["count"])
                if end_num > start_num + count_num:
                    end_num = start_num + count_num

            # for only give --total option it only shows "Total" cpu info
            if get_totalonly:
                end_num = -1

            # find CPU[N] in output and sum the cpu_time and cgroup cpu_time
            sum_cputime = 0
            sum_cgtime = 0
            logging.debug("start_num %d, end_num %d", start_num, end_num)
            for i in range(start_num, end_num):
                if not re.search('CPU' + "%i" % i, output):
                    test.fail("Fail to find CPU" + "%i" % i + "in " "result")
                logging.debug("Check CPU" + "%i" % i + " exist")
                sum_cputime += int(cpus_list[i - start_num + 1].split()[1])
                sum_cgtime += int(cgtime[i])

            # check cgroup cpu_time > sum of cpu_time
            if end_num >= 0:
                if sum_cputime > sum_cgtime:
                    test.fail("Check sum of cgroup cpu_time < sum "
                              "of output cpu_time")
                logging.debug("Check sum of cgroup cpu_time %d >= cpu_time %d",
                              sum_cgtime, sum_cputime)

            # check Total cpu_time >= sum of cpu_time when no options
            if get_noopt:
                if total_time < sum_cputime:
                    test.fail("total time < sum of output cpu_time")
                logging.debug(
                    "Check total time %d >= sum of output cpu_time"
                    " %d", total_time, sum_cputime)
Exemplo n.º 24
0
def run(test, params, env):
    """
    Test vcpu affinity feature as follows:
    positive test:
        1. use vcpu cpuset in xml to define vcpu affinity
        2. use cputune cpuset in xml to define vcpu affinity
        3. use offline-to-online host cpu as cpuset to run virsh vcpupin
        4. set vcpu placement in xml to auto and check xml result
        5. set vcpu cpuset in xml without placement defined and check xml result
    negative test:
        1. use outrange cpuset as vcpu cpuset in xml to define vcpu affinity
        2. use outrange cpuset as cputune cpuset in xml to define vcpu affinity
        3. use invalid cpuset as cputune cpuset in xml to define vcpu affinity
        4. use duplicate vcpu in xml to define vcpu affinity
        5. use offline host cpu as cputune cpuset to run virsh vcpupin
        6. set vcpu affinity for none exists vcpu and check xml result
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    cpuset_mask = params.get("cpuset_mask", "")
    vcpu = params.get("vcpu", "0")
    maxvcpu = params.get("maxvcpu", "8")
    current_vcpu = params.get("current_vcpu", "3")
    check = params.get("check", "")
    config_xml = params.get("config_xml", "")

    status_error = "yes" == params.get("status_error", "no")
    define_fail = "yes" == params.get("define_fail", "no")
    start_fail = "yes" == params.get("start_fail", "no")
    runtime_fail = "yes" == params.get("runtime_fail", "no")
    vm_down = "yes" == params.get("vm_down", "no")

    vcpu_cpuset = params.get("vcpu_cpuset", "")
    cputune_cpuset = params.get("cputune_cpuset", "")
    vcpu_placement = params.get("vcpu_placement", "static")
    err_msg = params.get("err_msg", "")
    start_timeout = int(params.get("start_timeout", "60"))
    offline_hostcpus = params.get("offline_hostcpus", "")
    machine_cpuset_path = params.get("machine_cpuset_path", "")

    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    def check_vcpu_affinity():
        """
        check vcpu affinity defined by vcpu cpuset or cputune cpuset
        """
        affinity = vcpu_cpuset if not cputune_cpuset else cputune_cpuset
        affinity = {vcpu: affinity}
        virsh.vcpuinfo(vm_name, debug=True)

        vmxml_live = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(vmxml_live)

        # if vcpu >= maxvcpu, the cputune should not exist in xml
        if int(vcpu) >= int(maxvcpu):
            try:
                if hasattr(vmxml_live, 'cputune'):
                    test.fail("cputune tag is set when vcpu >= maxvcpu")
        # check the expected vcpu affinity with the one got from running vm
                elif not utils_hotplug.check_affinity(vm, affinity):
                    test.fail("vcpu affinity check fail")
            except xcepts.LibvirtXMLError:
                pass

    try:
        hostcpu_num = int(cpu.total_cpus_count())

        # online all host cpus
        for x in range(hostcpu_num):
            if cpu.online(x):
                test.fail("fail to online cpu{}".format(x))

        # use vcpu cpuset or/and cputune cpuset to define xml
        del vmxml.cputune
        del vmxml.vcpus
        del vmxml.placement
        vmxml.vcpu = int(maxvcpu)
        vmxml.current_vcpu = current_vcpu

        # config vcpu cpuset for cpuset range test
        num = 1 if not status_error else 0
        cpuset_new = "0-{},^{}".format(hostcpu_num-num, cpuset_mask)
        if (config_xml == "vcpu" and check.endswith("range_cpuset")):
            vcpu_cpuset = cpuset_new
        vmxml.cpuset = vcpu_cpuset

        if vcpu_placement:
            vmxml.placement = vcpu_placement

        if config_xml == "cputune":
            cputune = vm_xml.VMCPUTuneXML()
            if check.endswith("range_cpuset"):
                cputune_cpuset = cpuset_new
            if check.endswith("duplicate_vcpu"):
                cputune.vcpupins = [{'vcpu': vcpu, 'cpuset': "2"}, {'vcpu': vcpu, 'cpuset': "3"}]
            else:
                cputune.vcpupins = [{'vcpu': vcpu, 'cpuset': cputune_cpuset}]
            vmxml.cputune = cputune

        logging.debug(vmxml)
        if status_error and define_fail:
            result_to_check = virsh.define(vmxml.xml, debug=True)
        else:
            vmxml.sync()

        # start the vm
        if status_error and start_fail:
            result_to_check = virsh.start(vm_name, debug=True)

        if (not status_error) or runtime_fail:
            result_to_check = virsh.start(vm_name, debug=True, ignore_status=False)
            vm.wait_for_login(timeout=start_timeout).close()

            # test vcpu cpuset in offline/online  host cpu scenario
            if check.endswith("offline_hostcpu"):
                if vm_down:
                    vm.shutdown()
                for x in offline_hostcpus.split(','):
                    if cpu.offline(x):
                        test.fail("fail to offline cpu{}".format(x))
                    logging.debug("offline host cpu {}".format(x))
                if vm_down:
                    vm.start()
                    vm.wait_for_login(timeout=start_timeout).close()
                if not status_error:
                    # online host cpu
                    if cpu.online(cputune_cpuset):
                        test.fail("fail to online cpu{}".format(cputune_cpuset))

            # run virsh vcpupin to config vcpu affinity
            if check.startswith("cputune") and (not config_xml):
                result_to_check = virsh.vcpupin(vm_name, vcpu, cputune_cpuset, debug=True)

            if check == "vcpu_placement":
                check_vcpu_placement(test, params)
            elif not status_error:
                check_vcpu_affinity()

        if 'result_to_check' in locals():
            if err_msg:
                err_msg = err_msg.split(";")
            libvirt.check_result(result_to_check, err_msg)

    finally:
        vmxml_backup.sync()

        # recovery the host cpu env
        for x in range(hostcpu_num):
            cpu.online(x)
        cmd = "echo '0-{}' > {}".format(hostcpu_num-1, machine_cpuset_path)
        process.run(cmd, shell=True)