def check_vm_cpu_model(vm_id, vcpu_model, expt_arch=None): if vcpu_model == 'Passthrough': pattern_ps = 'host' pattern_virsh = 'host-passthrough' virsh_tag = 'cpu' type_ = 'dict' elif vcpu_model: virsh_tag = 'cpu/model' type_ = 'text' if vcpu_model == 'Haswell': pattern_ps = pattern_virsh = r'(haswell|haswell\-notsx)' else: pattern_ps = pattern_virsh = vcpu_model.lower() else: # vcpu model is not set pattern_ps = None pattern_virsh = None virsh_tag = 'cpu' type_ = 'dict' LOG.info( "Check vcpu model successfully applied to vm via ps aux and virsh dumpxml on vm host" ) host = vm_helper.get_vm_host(vm_id) inst_name = vm_helper.get_vm_instance_name(vm_id) with host_helper.ssh_to_host(host) as host_ssh: output_ps = host_ssh.exec_cmd( "ps aux | grep --color='never' -i {}".format(vm_id), fail_ok=False)[1] output_virsh = host_helper.get_values_virsh_xmldump( inst_name, host_ssh, tag_paths=virsh_tag, target_type=type_) output_virsh = output_virsh[0] if vcpu_model: assert re.search(r'\s-cpu\s{}(\s|,)'.format(pattern_ps), output_ps.lower()), \ 'cpu_model {} not found for vm {}'.format(pattern_ps, vm_id) else: assert '-cpu' not in output_ps, "cpu model is specified in ps aux" if vcpu_model == 'Passthrough': assert output_virsh['mode'] == 'host-passthrough', \ 'cpu mode is not passthrough in virsh for vm {}'.format(vm_id) LOG.info("Check cpu passthrough model from within the vm") vm_vcpu_model = vm_helper.get_vcpu_model(vm_id) host_cpu_model = host_helper.get_host_cpu_model(host=host) assert host_cpu_model == vm_vcpu_model, "VM cpu model is different than host cpu model with cpu passthrough" if expt_arch: assert expt_arch == vm_vcpu_model, "VM cpu model changed. Original: {}. Current: {}".\ format(expt_arch, vcpu_model) elif vcpu_model: assert re.search(pattern_virsh, output_virsh.lower()), \ 'cpu model {} is not found in virsh for vm {}'.format(pattern_virsh, vm_id) else: assert output_virsh == {}, "Virsh cpu output: {}".format(output_virsh) vm_vcpu_model = vm_helper.get_vcpu_model(vm_id) assert 'QEMU Virtual CPU' in vm_vcpu_model, "vCPU model is not QEMU Virtual CPU when unspecified"
def check_host_file_for_vm(vm_id, expecting=True, host=None, fail_ok=True): LOG.info('Verify the file for vTPM exists on the hosting node for VM:' + vm_id) if host is None: host = vm_helper.get_vm_host(vm_id) active_controller_name = system_helper.get_active_controller_name() instance_name = vm_helper.get_vm_instance_name(vm_id) vtpm_file = vtpm_base_dir.format( vm_id=vm_id, instance_name=instance_name) + '/' + vtpm_file_name if host != active_controller_name: hosting_node = host else: hosting_node = active_controller_name with host_helper.ssh_to_host(hosting_node) as ssh_client: if ssh_client.file_exists(vtpm_file): LOG.info('OK, found the file for vTPM:{} on host:{}'.format( vtpm_file, host)) assert expecting is True or fail_ok is True, \ 'FAIL, the files supporting vTPM are NOT found on the {} as expected'.format(host) if expecting is True: LOG.info('-this is expected') else: LOG.info('-this is NOT expected') return True, expecting else: LOG.info('Cannot find the file for vTPM:{} on host:{}'.format( vtpm_file, host)) assert expecting is False or fail_ok is True, \ 'FAIL, the files should be cleared as expected' if expecting is False: LOG.info('-this is expected') else: LOG.info('-this is NOT expected') return False, expecting
def _check_vm_topology_on_host(vm_id, vcpus, vm_host, cpu_pol, cpu_thr_pol, host_log_core_siblings=None, shared_vcpu=None, shared_host_cpus=None): """ Args: vm_id (str): vcpus (int): vm_host (str): cpu_pol (str): cpu_thr_pol (str): host_log_core_siblings (list|None): shared_vcpu (int|None): shared_host_cpus (None|list) Returns: None """ if not host_log_core_siblings: host_log_core_siblings = host_helper.get_logcore_siblings(host=vm_host) if shared_vcpu and not shared_host_cpus: shared_cpus_ = host_helper.get_host_cpu_cores_for_function( func='Shared', hostname=vm_host, thread=None) shared_host_cpus = [] for proc, shared_cores in shared_cpus_.items(): shared_host_cpus += shared_cores LOG.info( '======= Check vm topology from vm_host via: virsh vcpupin, taskset') instance_name = vm_helper.get_vm_instance_name(vm_id) with host_helper.ssh_to_host(vm_host) as host_ssh: vcpu_cpu_map = vm_helper.get_vcpu_cpu_map(host_ssh=host_ssh) used_host_cpus = [] vm_host_cpus = [] vcpus_list = list(range(vcpus)) for instance_name_, instance_map in vcpu_cpu_map.items(): used_host_cpus += list(instance_map.values()) if instance_name_ == instance_name: for vcpu in vcpus_list: vm_host_cpus.append(instance_map[vcpu]) used_host_cpus = list(set(used_host_cpus)) vm_siblings = None # Check vm sibling pairs if 'ded' in cpu_pol and cpu_thr_pol in ('isolate', 'require'): if len(host_log_core_siblings[0]) == 1: assert cpu_thr_pol != 'require', \ "cpu_thread_policy 'require' must be used on a HT host" vm_siblings = [[vcpu_] for vcpu_ in vcpus_list] else: vm_siblings = [] for vcpu_index in vcpus_list: vm_host_cpu = vm_host_cpus[vcpu_index] for host_sibling in host_log_core_siblings: if vm_host_cpu in host_sibling: other_cpu = host_sibling[0] if \ vm_host_cpu == host_sibling[1] else \ host_sibling[1] if cpu_thr_pol == 'require': assert other_cpu in vm_host_cpus, \ "'require' vm uses only 1 of the sibling " \ "cores" vm_siblings.append( sorted([ vcpu_index, vm_host_cpus.index(other_cpu) ])) else: assert other_cpu not in used_host_cpus, \ "sibling core was not reserved for " \ "'isolate' vm" vm_siblings.append([vcpu_index]) LOG.info("{}Check vcpus for vm via sudo virsh vcpupin".format(SEP)) vcpu_pins = host_helper.get_vcpu_pins_for_instance_via_virsh( host_ssh=host_ssh, instance_name=instance_name) assert vcpus == len(vcpu_pins), \ 'Actual vm cpus number - {} is not as expected - {} in sudo ' \ 'virsh vcpupin'.format(len(vcpu_pins), vcpus) virsh_cpus_sets = [] for vcpu_pin in vcpu_pins: vcpu = int(vcpu_pin['vcpu']) cpu_set = common.parse_cpus_list(vcpu_pin['cpuset']) virsh_cpus_sets += cpu_set if shared_vcpu is not None and vcpu == shared_vcpu: assert len(cpu_set) == 1, \ "shared vcpu is pinned to more than 1 host cpu" assert cpu_set[0] in shared_host_cpus, \ "shared vcpu is not pinned to shared host cpu" if 'ded' in cpu_pol: assert set(vm_host_cpus) == set( virsh_cpus_sets), "pinned cpus in virsh cpupin is not the " \ "same as ps" else: assert set(vm_host_cpus) < set( virsh_cpus_sets), "floating vm should be affined to all " \ "available host cpus" LOG.info("{}Get cpu affinity list for vm via taskset -pc".format(SEP)) ps_affined_cpus = \ vm_helper.get_affined_cpus_for_vm(vm_id, host_ssh=host_ssh, vm_host=vm_host, instance_name=instance_name) assert set(ps_affined_cpus) == set( virsh_cpus_sets), "Actual affined cpu in taskset is different " \ "than virsh" return vm_host_cpus, vm_siblings
def test_vm_with_config_drive(hosts_per_stor_backing): """ Skip Condition: - no host with local_image backend Test Steps: - Launch a vm using config drive - Add test data to config drive on vm - Do some operations (reboot vm for simplex, cold migrate and lock host for non-simplex) and check test data persisted in config drive after each operation Teardown: - Delete created vm, volume, flavor """ guest_os = 'cgcs-guest' # guest_os = 'tis-centos-guest' # CGTS-6782 img_id = glance_helper.get_guest_image(guest_os) hosts_num = len(hosts_per_stor_backing.get('local_image', [])) if hosts_num < 1: skip("No host with local_image storage backing") volume_id = cinder_helper.create_volume(name='vol_inst1', source_id=img_id, guest_image=guest_os)[1] ResourceCleanup.add('volume', volume_id, scope='function') block_device = { 'source': 'volume', 'dest': 'volume', 'id': volume_id, 'device': 'vda' } vm_id = vm_helper.boot_vm(name='config_drive', config_drive=True, block_device=block_device, cleanup='function', guest_os=guest_os, meta={'foo': 'bar'})[1] LOG.tc_step("Confirming the config drive is set to True in vm ...") assert str(vm_helper.get_vm_values(vm_id, "config_drive")[0]) == 'True', \ "vm config-drive not true" LOG.tc_step("Add date to config drive ...") check_vm_config_drive_data(vm_id) vm_host = vm_helper.get_vm_host(vm_id) instance_name = vm_helper.get_vm_instance_name(vm_id) LOG.tc_step("Check config_drive vm files on hypervisor after vm launch") check_vm_files_on_hypervisor(vm_id, vm_host=vm_host, instance_name=instance_name) if not system_helper.is_aio_simplex(): LOG.tc_step("Cold migrate VM") vm_helper.cold_migrate_vm(vm_id) LOG.tc_step("Check config drive after cold migrate VM...") check_vm_config_drive_data(vm_id) LOG.tc_step("Lock the compute host") compute_host = vm_helper.get_vm_host(vm_id) HostsToRecover.add(compute_host) host_helper.lock_host(compute_host, swact=True) LOG.tc_step("Check config drive after locking VM host") check_vm_config_drive_data(vm_id, ping_timeout=VMTimeout.DHCP_RETRY) vm_host = vm_helper.get_vm_host(vm_id) else: LOG.tc_step("Reboot vm") vm_helper.reboot_vm(vm_id) LOG.tc_step("Check config drive after vm rebooted") check_vm_config_drive_data(vm_id) LOG.tc_step("Check vm files exist after nova operations") check_vm_files_on_hypervisor(vm_id, vm_host=vm_host, instance_name=instance_name)