def _check_vm_topology_on_host(vm_id, vcpus, vm_host, cpu_pol, cpu_thr_pol, host_log_core_siblings=None, shared_vcpu=None, shared_host_cpus=None): """ Args: vm_id (str): vcpus (int): vm_host (str): cpu_pol (str): cpu_thr_pol (str): host_log_core_siblings (list|None): shared_vcpu (int|None): shared_host_cpus (None|list) Returns: None """ if not host_log_core_siblings: host_log_core_siblings = host_helper.get_logcore_siblings(host=vm_host) if shared_vcpu and not shared_host_cpus: shared_cpus_ = host_helper.get_host_cpu_cores_for_function( func='Shared', hostname=vm_host, thread=None) shared_host_cpus = [] for proc, shared_cores in shared_cpus_.items(): shared_host_cpus += shared_cores LOG.info( '======= Check vm topology from vm_host via: virsh vcpupin, taskset') instance_name = vm_helper.get_vm_instance_name(vm_id) with host_helper.ssh_to_host(vm_host) as host_ssh: vcpu_cpu_map = vm_helper.get_vcpu_cpu_map(host_ssh=host_ssh) used_host_cpus = [] vm_host_cpus = [] vcpus_list = list(range(vcpus)) for instance_name_, instance_map in vcpu_cpu_map.items(): used_host_cpus += list(instance_map.values()) if instance_name_ == instance_name: for vcpu in vcpus_list: vm_host_cpus.append(instance_map[vcpu]) used_host_cpus = list(set(used_host_cpus)) vm_siblings = None # Check vm sibling pairs if 'ded' in cpu_pol and cpu_thr_pol in ('isolate', 'require'): if len(host_log_core_siblings[0]) == 1: assert cpu_thr_pol != 'require', \ "cpu_thread_policy 'require' must be used on a HT host" vm_siblings = [[vcpu_] for vcpu_ in vcpus_list] else: vm_siblings = [] for vcpu_index in vcpus_list: vm_host_cpu = vm_host_cpus[vcpu_index] for host_sibling in host_log_core_siblings: if vm_host_cpu in host_sibling: other_cpu = host_sibling[0] if \ vm_host_cpu == host_sibling[1] else \ host_sibling[1] if cpu_thr_pol == 'require': assert other_cpu in vm_host_cpus, \ "'require' vm uses only 1 of the sibling " \ "cores" vm_siblings.append( sorted([ vcpu_index, vm_host_cpus.index(other_cpu) ])) else: assert other_cpu not in used_host_cpus, \ "sibling core was not reserved for " \ "'isolate' vm" vm_siblings.append([vcpu_index]) LOG.info("{}Check vcpus for vm via sudo virsh vcpupin".format(SEP)) vcpu_pins = host_helper.get_vcpu_pins_for_instance_via_virsh( host_ssh=host_ssh, instance_name=instance_name) assert vcpus == len(vcpu_pins), \ 'Actual vm cpus number - {} is not as expected - {} in sudo ' \ 'virsh vcpupin'.format(len(vcpu_pins), vcpus) virsh_cpus_sets = [] for vcpu_pin in vcpu_pins: vcpu = int(vcpu_pin['vcpu']) cpu_set = common.parse_cpus_list(vcpu_pin['cpuset']) virsh_cpus_sets += cpu_set if shared_vcpu is not None and vcpu == shared_vcpu: assert len(cpu_set) == 1, \ "shared vcpu is pinned to more than 1 host cpu" assert cpu_set[0] in shared_host_cpus, \ "shared vcpu is not pinned to shared host cpu" if 'ded' in cpu_pol: assert set(vm_host_cpus) == set( virsh_cpus_sets), "pinned cpus in virsh cpupin is not the " \ "same as ps" else: assert set(vm_host_cpus) < set( virsh_cpus_sets), "floating vm should be affined to all " \ "available host cpus" LOG.info("{}Get cpu affinity list for vm via taskset -pc".format(SEP)) ps_affined_cpus = \ vm_helper.get_affined_cpus_for_vm(vm_id, host_ssh=host_ssh, vm_host=vm_host, instance_name=instance_name) assert set(ps_affined_cpus) == set( virsh_cpus_sets), "Actual affined cpu in taskset is different " \ "than virsh" return vm_host_cpus, vm_siblings
def _check_vm_topology_on_vm(vm_id, vcpus, siblings_total, current_vcpus=None, prev_siblings=None, guest=None, check_sibling=True): siblings_total_ = None if siblings_total: siblings_total_ = copy.deepcopy(siblings_total) # Check from vm in /proc/cpuinfo and # /sys/devices/.../cpu#/topology/thread_siblings_list if not guest: guest = '' if not current_vcpus: current_vcpus = int(vcpus) LOG.info( '=== Check vm topology from within the vm via: /sys/devices/system/cpu' ) actual_sibs = [] vm_helper.wait_for_vm_pingable_from_natbox(vm_id) with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh: win_expt_cores_per_sib = win_log_count_per_sibling = None if 'win' in guest: LOG.info( "{}Check windows guest cores via wmic cpu get cmds".format( SEP)) offline_cores_count = 0 log_cores_count, win_log_count_per_sibling = \ get_procs_and_siblings_on_windows(vm_ssh) online_cores_count = present_cores_count = log_cores_count else: LOG.info( "{}Check vm present|online|offline cores from inside vm via " "/sys/devices/system/cpu/".format(SEP)) present_cores, online_cores, offline_cores = \ vm_helper.get_proc_nums_from_vm(vm_ssh) present_cores_count = len(present_cores) online_cores_count = len(online_cores) offline_cores_count = len(offline_cores) assert vcpus == present_cores_count, \ "Number of vcpus: {}, present cores: {}".format( vcpus, present_cores_count) assert current_vcpus == online_cores_count, \ "Current vcpus for vm: {}, online cores: {}".format( current_vcpus, online_cores_count) expt_total_cores = online_cores_count + offline_cores_count assert expt_total_cores in [present_cores_count, 512], \ "Number of present cores: {}. online+offline cores: {}".format( vcpus, expt_total_cores) if check_sibling and siblings_total_ and online_cores_count == \ present_cores_count: expt_sibs_list = [[vcpu] for vcpu in range(present_cores_count)] if not \ siblings_total_ \ else siblings_total_ expt_sibs_list = [sorted(expt_sibs_list)] if prev_siblings: # siblings_total may get modified here expt_sibs_list.append(sorted(prev_siblings)) if 'win' in guest: LOG.info("{}Check windows guest siblings via wmic cpu get " "cmds".format(SEP)) expt_cores_list = [] for sib_list in expt_sibs_list: win_expt_cores_per_sib = [len(vcpus) for vcpus in sib_list] expt_cores_list.append(win_expt_cores_per_sib) assert win_log_count_per_sibling in expt_cores_list, \ "Expected log cores count per sibling: {}, actual: {}".\ format(win_expt_cores_per_sib, win_log_count_per_sibling) else: LOG.info("{}Check vm /sys/devices/system/cpu/[" "cpu#]/topology/thread_siblings_list".format(SEP)) for cpu in [ 'cpu{}'.format(i) for i in range(online_cores_count) ]: actual_sibs_for_cpu = \ vm_ssh.exec_cmd( 'cat /sys/devices/system/cpu/{}/topology/thread_' 'siblings_list'.format(cpu), fail_ok=False)[1] sib_for_cpu = common.parse_cpus_list(actual_sibs_for_cpu) if sib_for_cpu not in actual_sibs: actual_sibs.append(sib_for_cpu) assert sorted( actual_sibs) in expt_sibs_list, "Expt sib lists: {}, " \ "actual sib list: {}". \ format(expt_sibs_list, sorted(actual_sibs))
def check_rt_and_ord_cpus_via_virsh_and_ps(vm_id, vcpus, expt_rt_cpus, expt_ord_cpus, shared_vcpu=None, offline_cpus=None, check_virsh_vcpusched=True): LOG.tc_step("Check realtime and ordinary cpu info via virsh and ps") inst_name, vm_host = vm_helper.get_vm_values(vm_id, fields=[":instance_name", ":host"], strict=False) with host_helper.ssh_to_host(hostname=vm_host) as host_ssh: LOG.info("------ Check vcpusched, emulatorpin, and vcpupin in virsh dumpxml") vcpupins, emulatorpins, vcpuscheds = host_helper.get_values_virsh_xmldump( instance_name=inst_name, host_ssh=host_ssh, target_type='dict', tag_paths=('cputune/vcpupin', 'cputune/emulatorpin', 'cputune/vcpusched')) # Each vcpu should have its own vcpupin entry in virsh dumpxml assert vcpus == len(vcpupins), "vcpupin entries count in virsh dumpxml is not the same as vm vcpus count" LOG.info("------ Check realtime cpu count is same as specified in flavor and with fifo 1 policy") if check_virsh_vcpusched: if not expt_rt_cpus: assert not vcpuscheds, "vcpushed exists in virsh dumpxml when realtime_cpu != yes" else: LOG.info("------ Check vcpusched for realtime cpus") virsh_rt_cpus = [] for vcpusched in vcpuscheds: virsh_scheduler = vcpusched['scheduler'] virsh_priority = vcpusched['priority'] assert 'fifo' == virsh_scheduler, "Actual shed policy in virsh dumpxml: {}".format(virsh_scheduler) assert '1' == virsh_priority, "Actual priority in virsh dumpxml: {}".format(virsh_scheduler) virsh_rt_cpu = int(vcpusched['vcpus']) virsh_rt_cpus.append(virsh_rt_cpu) assert sorted(expt_rt_cpus) == sorted(virsh_rt_cpus), \ "Expected rt cpus: {}; Actual in virsh vcpusched: {}".format(expt_rt_cpus, virsh_rt_cpus) LOG.info("------ Check emulator cpus is a subset of ordinary cpus") emulator_cpusets_str = emulatorpins[0]['cpuset'] emulator_cpusets = common.parse_cpus_list(emulator_cpusets_str) cpuset_dict = {} virsh_ord_cpus = [] ord_cpusets = [] rt_cpusets = [] emulator_cpus = [] for vcpupin in vcpupins: cpuset = int(vcpupin['cpuset']) vcpu_id = int(vcpupin['vcpu']) if cpuset in emulator_cpusets: # Don't include vcpu_id in case of scaled-down vm. Example: # <cputune> # <shares>3072</shares> # <vcpupin vcpu='0' cpuset='25'/> # <vcpupin vcpu='1' cpuset='5'/> # <vcpupin vcpu='2' cpuset='25'/> # <emulatorpin cpuset='5,25'/> # <vcpusched vcpus='2' scheduler='fifo' priority='1'/> # </cputune> if cpuset not in list(cpuset_dict.values()): emulator_cpus.append(vcpu_id) cpuset_dict[vcpu_id] = cpuset if vcpu_id in expt_rt_cpus: rt_cpusets.append(cpuset) else: virsh_ord_cpus.append(vcpu_id) ord_cpusets.append(cpuset) LOG.info("cpuset dict: {}".format(cpuset_dict)) assert sorted(expt_ord_cpus) == sorted(virsh_ord_cpus), \ "expected ordinary cpus: {}; Actual in virsh vcpupin: {}".format(expt_ord_cpus, virsh_ord_cpus) if shared_vcpu is not None: assert emulator_cpus == [shared_vcpu], "Emulator cpu is not the shared vcpu" else: if expt_rt_cpus: assert sorted(emulator_cpus) == sorted(expt_ord_cpus), "Emulator cpus is not a subset of ordinary cpus" else: assert set(emulator_cpus) <= set(expt_ord_cpus), "Emulator cpu is not a subset of ordinary cpus when " \ "no realtime cpu or shared cpu set" comm_pattern = 'CPU [{}]/KVM' LOG.info("------ Check actual vm realtime cpu scheduler via ps") rt_comm = comm_pattern.format(','.join([str(vcpu) for vcpu in expt_rt_cpus])) vm_pid = vm_helper.get_vm_pid(instance_name=inst_name, host_ssh=host_ssh) ps_rt_scheds = vm_helper.get_sched_policy_and_priority_for_vcpus(vm_pid, host_ssh, cpusets=rt_cpusets, comm=rt_comm) assert len(expt_rt_cpus) == len(ps_rt_scheds) for ps_rt_sched in ps_rt_scheds: ps_rt_pol, ps_rt_prio, ps_rt_comm = ps_rt_sched expt_pol = 'FF' expt_prio = '1' if offline_cpus: if isinstance(offline_cpus, int): offline_cpus = [offline_cpus] cpu = int(re.findall(r'(\d+)/KVM', ps_rt_comm)[0]) if cpu in offline_cpus: expt_pol = 'TS' expt_prio = '-' assert ps_rt_pol == expt_pol, \ "Actual sched policy: {}. ps_rt_sheds parsed: {}".format(ps_rt_pol, ps_rt_scheds) assert ps_rt_prio == expt_prio, \ "Actual priority: {}. ps_rt_sheds parsed: {}".format(ps_rt_pol, ps_rt_scheds) LOG.info("------ Check actual vm ordinary cpu scheduler via ps") ord_comm = comm_pattern.format(','.join([str(vcpu) for vcpu in expt_ord_cpus])) ps_ord_scheds = vm_helper.get_sched_policy_and_priority_for_vcpus(vm_pid, host_ssh, cpusets=ord_cpusets, comm=ord_comm) for ps_ord_sched in ps_ord_scheds: ps_ord_pol, ps_ord_prio, ps_ord_comm = ps_ord_sched assert ps_ord_pol == 'TS' and ps_ord_prio == '-', "ps_ord_scheds parsed: {}".format(ps_ord_scheds)