def _boot_migrable_vms(storage_backing): """ Create vms with specific storage backing that can be live migrated Args: storage_backing: 'local_image' or 'remote' Returns: (vms_info (list), flavors_created (list)) vms_info : [(vm_id1, block_mig1), (vm_id2, block_mig2), ...] """ vms_to_test = [] flavors_created = [] flavor_no_localdisk = nova_helper.create_flavor( ephemeral=0, swap=0, storage_backing=storage_backing)[1] flavors_created.append(flavor_no_localdisk) vm_1 = vm_helper.boot_vm(flavor=flavor_no_localdisk, source='volume')[1] block_mig_1 = False vms_to_test.append((vm_1, block_mig_1)) LOG.info( "Boot a VM from image if host storage backing is local_image or remote..." ) vm_2 = vm_helper.boot_vm(flavor=flavor_no_localdisk, source='image')[1] block_mig_2 = True vms_to_test.append((vm_2, block_mig_2)) if storage_backing == 'remote': LOG.info( "Boot a VM from volume with local disks if storage backing is remote..." ) ephemeral_swap = random.choice([[0, 512], [1, 512], [1, 0]]) flavor_with_localdisk = nova_helper.create_flavor( ephemeral=ephemeral_swap[0], swap=ephemeral_swap[1])[1] flavors_created.append(flavor_with_localdisk) vm_3 = vm_helper.boot_vm(flavor=flavor_with_localdisk, source='volume')[1] block_mig_3 = False vms_to_test.append((vm_3, block_mig_3)) LOG.info( "Boot a VM from image with volume attached if storage backing is remote..." ) vm_4 = vm_helper.boot_vm(flavor=flavor_no_localdisk, source='image')[1] vm_helper.attach_vol_to_vm(vm_id=vm_4) block_mig_4 = False vms_to_test.append((vm_4, block_mig_4)) return vms_to_test, flavors_created
def _boot_vm_under_test(storage_backing, ephemeral, swap, cpu_pol, vcpus, vm_type): LOG.tc_step( "Create a flavor with {} vcpus, {}G ephemera disk, {}M swap " "disk".format(vcpus, ephemeral, swap)) flavor_id = nova_helper.create_flavor( name='migration_test', ephemeral=ephemeral, swap=swap, vcpus=vcpus, storage_backing=storage_backing, cleanup='function')[1] if cpu_pol is not None: specs = {FlavorSpec.CPU_POLICY: cpu_pol} LOG.tc_step("Add following extra specs: {}".format(specs)) nova_helper.set_flavor(flavor=flavor_id, **specs) boot_source = 'volume' if vm_type == 'volume' else 'image' LOG.tc_step("Boot a vm from {}".format(boot_source)) vm_id = vm_helper.boot_vm('migration_test', flavor=flavor_id, source=boot_source, reuse_vol=False, cleanup='function')[1] if vm_type == 'image_with_vol': LOG.tc_step("Attach volume to vm") vm_helper.attach_vol_to_vm(vm_id=vm_id, mount=False) return vm_id
def create_rt_vm(hypervisor): global testable_hypervisors LOG.tc_step('Create/get glance image using rt guest image') image_id = glance_helper.get_guest_image(guest_os='tis-centos-guest-rt', cleanup='module') vcpu_count = VM_CPU_NUM non_rt_core = 0 LOG.tc_step( 'Create a flavor with specified cpu model, cpu policy, realtime mask, and 2M pagesize' ) flavor_id, storage_backing = nova_helper.create_flavor( ram=1024, vcpus=vcpu_count, root_disk=2, storage_backing='local_image')[1:3] cpu_info = dict(testable_hypervisors[hypervisor]['cpu_info']) extra_specs = { FlavorSpec.VCPU_MODEL: cpu_info['model'], FlavorSpec.CPU_POLICY: 'dedicated', FlavorSpec.CPU_REALTIME: 'yes', FlavorSpec.CPU_REALTIME_MASK: '^{}'.format(non_rt_core), FlavorSpec.MEM_PAGE_SIZE: 2048, } nova_helper.set_flavor(flavor_id, **extra_specs) LOG.tc_step( 'Boot a VM with rt flavor and image on the targeted hypervisor: {}'. format(hypervisor)) vm_id = vm_helper.boot_vm(flavor=flavor_id, source='image', source_id=image_id, vm_host=hypervisor, cleanup='function')[1] return vm_id, vcpu_count, non_rt_core
def test_timing(): threads = [] flav_id = nova_helper.create_flavor('thread_testing')[1] ResourceCleanup.add(resource_type='flavor', resource_id=flav_id) start_1 = time() for i in range(0, 6): thread = MThread(vm_helper.boot_vm, 'threading_vm', flavor=flav_id) thread.start_thread(240) threads.append(thread) for thread in threads: thread.wait_for_thread_end() for thread in threads: ResourceCleanup.add(resource_type='vm', resource_id=thread.get_output()[1]) end_1 = time() start_2 = time() for i in range(0, 2): vm_id = vm_helper.boot_vm('loop_vm', flav_id)[1] ResourceCleanup.add(resource_type='vm', resource_id=vm_id) end_2 = time() LOG.info("Time results:\n" "Multithreading: {}\n" "Single loop: {}".format(end_1 - start_1, end_2 - start_2))
def create_flavor_for_pci(self, vcpus=4, ram=1024): flavor_id = nova_helper.create_flavor(name='dedicated_pci_extras', vcpus=vcpus, ram=ram, cleanup='function')[1] pci_alias_spec = '{}:{}'.format( self.pci_alias_names[0], self.pci_alias) if self.pci_alias else None LOG.tc_step('Set extra-specs to the flavor {}'.format(flavor_id)) extra_specs = { FlavorSpec.CPU_POLICY: 'dedicated', # FlavorSpec.PCI_NUMA_AFFINITY: self.pci_numa_affinity, # LP1854516 FlavorSpec.PCI_PASSTHROUGH_ALIAS: pci_alias_spec, FlavorSpec.PCI_IRQ_AFFINITY_MASK: self.pci_irq_affinity_mask } extra_specs = { k: str(v) for k, v in extra_specs.items() if v is not None } if extra_specs: nova_helper.set_flavor(flavor_id, **extra_specs) return flavor_id
def test_something_avoid(modify_system_backing): """ Test to AVOID! Do NOT parametrize module/class level fixture unless you are absolutely sure about the impact and intend to do so. Note that when a module level fixture is parametrized, both the setups AND teardowns will be run multiple times. Args: modify_system_backing: Setups: - Lock host, modify host storage backing to given backing, unlock host (module) Test Steps: - Create a flavor with specified storage backing - Boot vm from above flavor Teardown: - Delete created vm, volume, flavor - Lock host, modify host storage backing to local_image, unlock host (module) """ LOG.tc_step("Create a flavor with specified storage backing") storage_backing = modify_system_backing flv_id = nova_helper.create_flavor(name='test_avoid_flv', storage_backing=storage_backing)[1] ResourceCleanup.add(resource_type='flavor', resource_id=flv_id) LOG.tc_step("Boot vm from above flavor") vm_id = vm_helper.boot_vm(name='test_avoid_vm', flavor=flv_id)[1] ResourceCleanup.add(resource_type='vm', resource_id=vm_id)
def base_setup(self): flavor_id = nova_helper.create_flavor(name='dedicated')[1] ResourceCleanup.add('flavor', flavor_id, scope='class') extra_specs = {FlavorSpec.CPU_POLICY: 'dedicated'} nova_helper.set_flavor(flavor=flavor_id, **extra_specs) mgmt_net_id = network_helper.get_mgmt_net_id() tenant_net_id = network_helper.get_tenant_net_id() internal_net_id = network_helper.get_internal_net_id() nics = [{'net-id': mgmt_net_id}, {'net-id': tenant_net_id}, {'net-id': internal_net_id}] LOG.fixture_step( "(class) Boot a base vm with following nics: {}".format(nics)) base_vm = vm_helper.boot_vm(name='multiports_base', flavor=flavor_id, nics=nics, cleanup='class', reuse_vol=False)[1] vm_helper.wait_for_vm_pingable_from_natbox(base_vm) vm_helper.ping_vms_from_vm(base_vm, base_vm, net_types='data') return base_vm, flavor_id, mgmt_net_id, tenant_net_id, internal_net_id
def create_flavors(self): LOG.tc_step('Create flavors') flavor_name_format = 'pve_flavor_{}' for sn in range(NUM_VM): name = flavor_name_format.format(sn) options = { 'name': name, 'vcpus': self.vcpus[sn], 'ram': self.mem[sn], 'root_disk': self.root_disk[sn], 'is_public': True, 'storage_backing': self.storage_backing, } if self.swap_disk: options['swap'] = self.swap_disk[sn] flavor_id = nova_helper.create_flavor(**options)[1] ResourceCleanup.add('flavor', flavor_id, scope='function') self.vms_info.update( {sn: { 'flavor_name': name, 'flavor_id': flavor_id }}) # TODO create volume LOG.info('OK, flavors created:\n{}\n'.format( [vm['flavor_id'] for vm in self.vms_info.values()]))
def create_flavor(vm_type, flavor_type=None, name=core_flavor_name): global g_flavors extra_specs = {} if 'non_vtpm' in vm_type or (flavor_type and 'non_vtpm' in flavor_type): name += '_nonvtpm' extra_specs['sw:wrs:vtpm'] = 'false' else: extra_specs['sw:wrs:vtpm'] = 'true' if 'non_autorc' in vm_type or (flavor_type and 'non_autorc' in flavor_type): name += '_nonrc' extra_specs['sw:wrs:auto_recovery'] = 'false' elif 'autorc' in vm_type or (flavor_type and 'autorc' in flavor_type): name += '_autorc' extra_specs['sw:wrs:auto_recovery'] = 'true' flavor_id = nova_helper.create_flavor(name=name)[1] nova_helper.set_flavor(flavor_id, **extra_specs) if flavor_type is not None: g_flavors[flavor_type] = flavor_id else: g_flavors[vm_type] = flavor_id return flavor_id
def _test_check_vm_disk_on_compute(storage, hosts_per_backing): """ Tests that existence of volumes are properly reported for lvm-backed vms. Skip: - Skip if no lvm-configured compute nodes available Test steps: - Create a flavor for a lvm-backed vms and boot vm out of that flavor - SSH onto the node hosting the VM and do the following: - Run ps aux and confirm that there is a qemu process - Run sudo lvs and confirm the existence of a thin pool - Run sudo lvs and confirm the existence of a volume for the vm - Ensure that the "free" space shown for the hypervisor (obtained by running "nova hypervisor-show <compute node>" and then checking the "free_disk_gb" field) reflects the space available within the thin pool - Delete the instance and ensure that space is returned to the hypervisor Test Teardown: - Delete created VM if not already done """ hosts_with_backing = hosts_per_backing.get(storage, []) if not hosts_with_backing: skip(SkipStorageBacking.NO_HOST_WITH_BACKING.format(storage)) LOG.tc_step("Create flavor and boot vm") flavor = nova_helper.create_flavor(storage_backing=storage)[1] ResourceCleanup.add('flavor', flavor, scope='function') vm = vm_helper.boot_vm(source='image', flavor=flavor, cleanup='function')[1] vm_helper.wait_for_vm_pingable_from_natbox(vm) vm_host = vm_helper.get_vm_host(vm) with host_helper.ssh_to_host(vm_host) as compute_ssh: LOG.tc_step("Look for qemu process") compute_ssh.exec_sudo_cmd(cmd="lvs --units g") assert check_for_qemu_process(compute_ssh), "qemu process not found when calling ps" LOG.tc_step("Look for pool information") thin_pool_size = get_initial_pool_space(compute_ssh, vm) vm_vol_name = vm + '_disk' raw_vm_volume_output = \ compute_ssh.exec_sudo_cmd(cmd="lvs --units g --noheadings -o lv_size -S lv_name={}".format(vm_vol_name))[1] assert raw_vm_volume_output, "created vm volume not found" vm_volume_size = float(raw_vm_volume_output.strip('<g')) LOG.tc_step("Calculate compute free disk space and ensure that it reflects thin pool") expected_space_left = int(thin_pool_size - vm_volume_size) free_disk_space = get_compute_free_disk_gb(vm_host) assert expected_space_left - 1 <= free_disk_space <= expected_space_left + 1, \ 'Hypervisor-show does not reflect space within thin pool' LOG.tc_step("Calculate free space following vm deletion (ensure volume space is returned)") vm_helper.delete_vms(vm) free_disk_space = get_compute_free_disk_gb(vm_host) assert int(thin_pool_size) == free_disk_space, \ 'Space is not properly returned to the hypervisor or hypervisor info does not properly reflect it'
def test_flavor_setting_numa_negative(vcpus, vswitch_affinity, numa_nodes, numa0, numa0_cpus, numa0_mem, numa1, numa1_cpus, numa1_mem, expt_err): LOG.tc_step("Create a 1024ram flavor with {} vcpus".format(vcpus)) name = 'vswitch_affinity_{}_1G_{}cpu'.format(vswitch_affinity, vcpus) flv_id = nova_helper.create_flavor(name=name, vcpus=vcpus, ram=1024)[1] ResourceCleanup.add('flavor', flv_id) specs = { FlavorSpec.CPU_POLICY: 'dedicated', FlavorSpec.NUMA_NODES: numa_nodes, FlavorSpec.VSWITCH_NUMA_AFFINITY: vswitch_affinity } tmp_dict = { FlavorSpec.NUMA_0: numa0, FlavorSpec.NUMA0_CPUS: numa0_cpus, FlavorSpec.NUMA0_MEM: numa0_mem, FlavorSpec.NUMA_1: numa1, FlavorSpec.NUMA1_CPUS: numa1_cpus, FlavorSpec.NUMA1_MEM: numa1_mem } for key, val in tmp_dict.items(): if val is not None: specs[key] = val LOG.tc_step( "Attempt to set following extra spec to flavor {} and ensure it's rejected: {}" .format(flv_id, specs)) code, output = nova_helper.set_flavor(flv_id, fail_ok=True, **specs) assert 1 == code, "Invalid extra spec is not rejected. Details: {}".format( output) assert eval(expt_err) in output, "Expected error message is not found"
def _flavors(hosts_pci_device_info): """ Creates all flavors required for this test module """ # Create flavor using first device. pci_alias = list(hosts_pci_device_info.values())[0][0]['pci_alias'] flavor_parms = {'flavor_qat_vf_1': [2, 1024, 2, 1], 'flavor_resize_qat_vf_1': [4, 2048, 2, 1], 'flavor_qat_vf_4': [2, 1024, 2, 4], 'flavor_resize_qat_vf_4': [2, 2048, 2, 4], 'flavor_qat_vf_32': [2, 1024, 2, 32], 'flavor_qat_vf_33': [2, 1024, 2, 33], 'flavor_none': [1, 1024, 2, 0], 'flavor_resize_none': [2, 2048, 2, 0], 'flavor_resize_qat_vf_32': [4, 2048, 2, 32], } flavors = {} for k, v in flavor_parms.items(): vf = v[3] LOG.fixture_step("Create a flavor with {} Coletro Creek crypto VF....".format(vf)) flavor_id = nova_helper.create_flavor(name=k, vcpus=v[0], ram=v[1], root_disk=v[2])[1] ResourceCleanup.add('flavor', flavor_id, scope='module') if vf > 0: extra_spec = {FlavorSpec.PCI_PASSTHROUGH_ALIAS: '{}:{}'.format(pci_alias, vf), # FlavorSpec.NUMA_NODES: '2', # feature deprecated. May need to update test case as well. FlavorSpec.CPU_POLICY: 'dedicated'} nova_helper.set_flavor(flavor_id, **extra_spec) flavors[k] = flavor_id return flavors
def test_boot_windows_guest(): """ Boot a windows guest to assist for manual testing on windows guest """ # Change the following parameters to change the vm type. guest = 'win_2012' # such as tis-centos-guest storage = 'local_image' # local_lvm, local_image, or remote boot_source = 'image' # volume or image LOG.tc_step("Get/Create {} glance image".format(guest)) glance_helper.get_guest_image(guest_os=guest) LOG.tc_step("Create flavor with {} storage backing".format(storage)) flv_id = nova_helper.create_flavor(name='{}-{}'.format(storage, guest), vcpus=4, ram=8192, storage_backing=storage, guest_os=guest)[1] nova_helper.set_flavor(flv_id, **{FlavorSpec.CPU_POLICY: 'dedicated'}) LOG.tc_step("Boot {} vm".format(guest)) vm_id = vm_helper.boot_vm(name='{}-{}'.format(guest, storage), flavor=flv_id, guest_os=guest, source=boot_source)[1] LOG.tc_step("Ping vm and ssh to it") vm_helper.wait_for_vm_pingable_from_natbox(vm_id) with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh: code, output = vm_ssh.exec_cmd('pwd', fail_ok=False) LOG.info(output) LOG.info( "{} is successfully booted from {} with {} storage backing".format( guest, boot_source, storage))
def prepare_resource(add_admin_role_module): hypervisor = random.choice(host_helper.get_up_hypervisors()) flavor = nova_helper.create_flavor(name='flavor-1g', ram=1024, cleanup='module')[1] vol_id = cinder_helper.create_volume('vol-mem_page_size', cleanup='module')[1] return hypervisor, flavor, vol_id
def vif_model_check(request): vif_model = request.param LOG.fixture_step( "Get a network that supports {} to boot vm".format(vif_model)) pci_net = network_helper.get_pci_vm_network(pci_type=vif_model, net_name='internal0-net') if not pci_net: skip(SkipHostIf.PCI_IF_UNAVAIL) extra_pcipt_net_name = extra_pcipt_net = None if not isinstance(pci_net, str): pci_net, extra_pcipt_net_name = pci_net LOG.info("PCI network selected to boot vm: {}".format(pci_net)) LOG.fixture_step("Create a flavor with dedicated cpu policy") flavor_id = nova_helper.create_flavor(name='dedicated', ram=2048, cleanup='module')[1] extra_specs = {FlavorSpec.CPU_POLICY: 'dedicated'} nova_helper.set_flavor(flavor=flavor_id, **extra_specs) LOG.fixture_step("Boot a base vm with above flavor and virtio nics") mgmt_net_id = network_helper.get_mgmt_net_id() pci_net_id, seg_id, pnet_name = network_helper.get_network_values( network=pci_net, fields=('id', 'provider:segmentation_id', 'provider:physical_network')) nics = [{'net-id': mgmt_net_id}, {'net-id': pci_net_id}] nics_to_test = [{ 'net-id': mgmt_net_id }, { 'net-id': pci_net_id, 'vif-model': vif_model }] pcipt_seg_ids = {} if vif_model == 'pci-passthrough': pcipt_seg_ids[pci_net] = seg_id if extra_pcipt_net_name: extra_pcipt_net, seg_id = network_helper.get_network_values( network=extra_pcipt_net_name, fields=('id', 'provider:segmentation_id')) nics.append({'net-id': extra_pcipt_net}) nics_to_test.append({ 'net-id': extra_pcipt_net, 'vif-model': vif_model }) pcipt_seg_ids[extra_pcipt_net_name] = seg_id base_vm = vm_helper.boot_vm(flavor=flavor_id, nics=nics, cleanup='module')[1] vm_helper.wait_for_vm_pingable_from_natbox(base_vm) vm_helper.ping_vms_from_vm(base_vm, base_vm, net_types=['mgmt', 'internal']) return vif_model, base_vm, flavor_id, nics_to_test, pcipt_seg_ids, pnet_name, extra_pcipt_net
def flavor_id_module(): """ Create basic flavor and volume to be used by test cases as test setup, at the beginning of the test module. Delete the created flavor and volume as test teardown, at the end of the test module. """ flavor = nova_helper.create_flavor()[1] ResourceCleanup.add('flavor', resource_id=flavor, scope='module') return flavor
def _create_flavor_vcpu_model(vcpu_model, root_disk_size=None): flv_id = nova_helper.create_flavor(name='vcpu_model_{}'.format(vcpu_model), root_disk=root_disk_size)[1] ResourceCleanup.add('flavor', flv_id) if vcpu_model: nova_helper.set_flavor(flavor=flv_id, **{FlavorSpec.VCPU_MODEL: vcpu_model}) return flv_id
def flavor_2g(self, add_1g_and_4k_pages): hosts, storage_backing = add_1g_and_4k_pages LOG.fixture_step("Create a 2G memory flavor to be used by mempage " "testcases") flavor = nova_helper.create_flavor(name='flavor-2g', ram=2048, storage_backing=storage_backing, cleanup='class')[1] return flavor, hosts, storage_backing
def test_lock_unlock_secure_boot_vm(): """ This is to test host lock with secure boot vm. :return: """ guests_os = ['trusty_uefi', 'uefi_shell'] disk_format = ['qcow2', 'raw'] image_ids = [] volume_ids = [] for guest_os, disk_format in zip(guests_os, disk_format): image_ids.append( create_image_with_metadata( guest_os=guest_os, property_key=ImageMetadata.FIRMWARE_TYPE, values=['uefi'], disk_format=disk_format, container_format='bare')) # create a flavor flavor_id = nova_helper.create_flavor(vcpus=2, ram=1024, root_disk=5)[1] ResourceCleanup.add('flavor', flavor_id) # boot a vm using the above image for image_id in image_ids: volume_ids.append( cinder_helper.create_volume(source_id=image_id[0], size=5, cleanup='function')[1]) block_device_dic = [{ 'id': volume_ids[1], 'source': 'volume', 'bootindex': 0 }, { 'id': volume_ids[0], 'source': 'volume', 'bootindex': 1 }] vm_id = vm_helper.boot_vm(name='sec-boot-vm', source='block_device', flavor=flavor_id, block_device=block_device_dic, cleanup='function', guest_os=guests_os[0])[1] _check_secure_boot_on_vm(vm_id=vm_id) # Lock the compute node with the secure Vms compute_host = vm_helper.get_vm_host(vm_id=vm_id) host_helper.lock_host(compute_host, timeout=800) if not system_helper.is_aio_simplex(): _check_secure_boot_on_vm(vm_id=vm_id) host_helper.unlock_host(compute_host, timeout=800) if system_helper.is_aio_simplex(): _check_secure_boot_on_vm(vm_id=vm_id)
def test_boot_ge_edge_uefi(): guest = 'ge_edge' LOG.tc_step("Get ge_edge guest image from test server and create glance image with uefi property") glance_helper.get_guest_image(guest_os=guest, rm_image=True) LOG.tc_step("Create a flavor for ge_edge vm") flavor = nova_helper.create_flavor(guest_os=guest)[1] LOG.tc_step("Launch a GE_EDGE vm with UEFI boot") vm_helper.boot_vm(name='ge_edge_uefi', flavor=flavor, guest_os=guest)
def test_create_flavor(name, swap, ephemeral, storage, cpu_policy): flavor_id = nova_helper.create_flavor(name=name, swap=swap, ephemeral=ephemeral)[1] LOG.info("Flavor id: {}".format(flavor_id)) specs = { 'aggregate_instance_extra_specs:storage': storage, 'hw:cpu_policy': cpu_policy } nova_helper.set_flavor(flavor=flavor_id, **specs)
def flavor_memconf(self, add_1g_and_4k_pages): hosts, storage_backing = add_1g_and_4k_pages LOG.fixture_step("Create a {}G memory flavor to be used by mempage " "testcases".format(VM_MEM_GIB)) flavor = nova_helper.create_flavor( name='flavor-mem{}g'.format(VM_MEM_GIB), ram=1024 * VM_MEM_GIB, storage_backing=storage_backing, cleanup='class')[1] return flavor, hosts, storage_backing
def flavors(): flvs = {} for numa in ['0', '1']: numa_flv = nova_helper.create_flavor(name='numa{}'.format(numa), vcpus=2)[1] # ResourceCleanup.add('flavor', numa_flv, scope='module') flvs['numa{}'.format(numa)] = numa_flv extra_specs = {FlavorSpec.CPU_POLICY: 'dedicated', FlavorSpec.NUMA_0: numa} nova_helper.set_flavor(numa_flv, **extra_specs) return flvs
def test_force_lock_with_non_mig_vms(add_host_to_zone): """ Test force lock host with non-migrate-able vms on it Prerequisites: - Minimum of two up hypervisors Test Setups: - Add admin role to primary tenant - Create cgcsauto aggregate - Add host_under_test to cgcsauto aggregate - Create flavor for vms_to_test with storage_backing support by host_under_test - Create vms_to_test on host_under_test that can be live migrated Test Steps: - Force lock target host - Verify force lock returns 0 - Verify VMs cannot find a host to boot and are in error state - Unlock locked target host - Verify VMs are active on host once it is up and available - Verify VMs can be pinged Test Teardown: - Remove admin role from primary tenant - Delete created vms - Remove host_under_test from cgcsauto aggregate """ storage_backing, host_under_test = add_host_to_zone # Create flavor with storage_backing the host_under_test supports flavor_id = nova_helper.create_flavor(storage_backing=storage_backing)[1] # Boot VMs on the host using the above flavor. LOG.tc_step("Boot VM on {}".format(host_under_test)) vm_id = vm_helper.boot_vm(vm_host=host_under_test, flavor=flavor_id, avail_zone='cgcsauto', cleanup='function')[1] vm_helper.wait_for_vm_pingable_from_natbox(vm_id) # Force lock host that VMs are booted on. LOG.tc_step("Force lock {}".format(host_under_test)) HostsToRecover.add(host_under_test) lock_code, lock_output = host_helper.lock_host(host_under_test, force=True) assert lock_code == 0, "Failed to lock {}. Details: {}".format( host_under_test, lock_output) vm_helper.wait_for_vm_values(vm_id, fail_ok=False, **{'status': 'ERROR'}) host_helper.unlock_host(host_under_test) vm_helper.wait_for_vm_values(vm_id, timeout=300, fail_ok=False, **{'status': 'ACTIVE'}) vm_helper.wait_for_vm_pingable_from_natbox(vm_id, timeout=VMTimeout.DHCP_RETRY)
def _create_flavor(flavor_info, storage_backing): root_disk = flavor_info[0] ephemeral = flavor_info[1] swap = flavor_info[2] flavor_id = nova_helper.create_flavor(ephemeral=ephemeral, swap=swap, root_disk=root_disk, storage_backing=storage_backing)[1] ResourceCleanup.add('flavor', flavor_id) return flavor_id
def flavor_unset(request): """ Create basic flavor with 2 vcpus and 1 numa node """ flavor = nova_helper.create_flavor('test_unset_numa', vcpus=2)[1] def delete(): nova_helper.delete_flavors(flavor) request.addfinalizer(delete) return flavor
def flavor_(request): # Create a flavor as test setup flavor_id = nova_helper.create_flavor()[1] # Test teardown function def delete(): nova_helper.delete_flavors(flavor_id) # Add delete function to teardown request.addfinalizer(delete) # Pass the flavor_id to test function return flavor_id
def test_migration_auto_converge(no_simplex): """ Auto converge a VM with stress-ng running Test Steps: - Create flavor - Create a heat stack (launch a vm with stress-ng) - Perform live-migration and verify connectivity Test Teardown: - Delete stacks,vm, flavors created """ LOG.tc_step("Create a flavor with 2 vcpus") flavor_id = nova_helper.create_flavor(vcpus=2, ram=1024, root_disk=3)[1] ResourceCleanup.add('flavor', flavor_id) # add migration timout extra_specs = {FlavorSpec.LIVE_MIG_TIME_OUT: 300} nova_helper.set_flavor(flavor=flavor_id, **extra_specs) LOG.tc_step("Get the heat file name to use") heat_template = _get_stress_ng_heat() stack_name = vm_name = 'stress_ng' LOG.tc_step("Creating heat stack") code, msg = heat_helper.create_stack(stack_name=stack_name, template=heat_template, parameters={ 'flavor': flavor_id, 'name': vm_name }, cleanup='function') assert code == 0, "Failed to create heat stack" LOG.info("Verifying server creation via heat") vm_id = vm_helper.get_vm_id_from_name(vm_name='stress_ng', strict=False) vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id) with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh: LOG.tc_step("Check for Stress-ng inside vm") assert 0 == wait_for_stress_ng(vm_ssh), " Stress-ng is not running" for vm_actions in [['live_migrate']]: LOG.tc_step("Perform following action(s) on vm {}: {}".format( vm_id, vm_actions)) for action in vm_actions: vm_helper.perform_action_on_vm(vm_id, action=action) LOG.tc_step("Ping vm from natbox") vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
def flavor_0_node(request): """ Create basic flavor with 2 vcpus and 1 numa node """ flavor = nova_helper.create_flavor('no_numa_node', vcpus=1)[1] def delete(): nova_helper.delete_flavors(flavor) request.addfinalizer(delete) return flavor
def test_vcpu_model_and_thread_policy(vcpu_model, thread_policy, cpu_models_supported): """ Launch vm with vcpu model spec and cpu thread policy both set Args: vcpu_model (str): thread_policy (str): cpu_models_supported (tuple): fixture Test Steps: - create flavor with vcpu model and cpu thread extra specs set - boot vm from volume with above flavor - if no hyperthreaded host, check vm failed to schedule - otherwise check vcpu model and cpu thread policy both set as expected """ cpu_models_multi_host, all_cpu_models_supported = cpu_models_supported is_supported = (vcpu_model == 'Passthrough') or (vcpu_model in all_cpu_models_supported) if not is_supported: skip("{} is not supported by any hypervisor".format(vcpu_model)) name = '{}_{}'.format(vcpu_model, thread_policy) flv_id = nova_helper.create_flavor(name=name, vcpus=2)[1] ResourceCleanup.add('flavor', flv_id) nova_helper.set_flavor(flavor=flv_id, **{ FlavorSpec.VCPU_MODEL: vcpu_model, FlavorSpec.CPU_POLICY: 'dedicated', FlavorSpec.CPU_THREAD_POLICY: thread_policy }) code, vm, msg = vm_helper.boot_vm(name=name, flavor=flv_id, fail_ok=True, cleanup='function') ht_hosts = host_helper.get_hypersvisors_with_config(hyperthreaded=True, up_only=True) if thread_policy == 'require' and not ht_hosts: assert 1 == code else: assert 0 == code, "VM is not launched successfully" check_vm_cpu_model(vm_id=vm, vcpu_model=vcpu_model) vm_host = vm_helper.get_vm_host(vm) check_helper.check_topology_of_vm(vm_id=vm, vcpus=2, cpu_pol='dedicated', cpu_thr_pol=thread_policy, numa_num=1, vm_host=vm_host)