def _boot_vm_under_test(storage_backing, ephemeral, swap, cpu_pol, vcpus, vm_type): LOG.tc_step( "Create a flavor with {} vcpus, {}G ephemera disk, {}M swap " "disk".format(vcpus, ephemeral, swap)) flavor_id = nova_helper.create_flavor( name='migration_test', ephemeral=ephemeral, swap=swap, vcpus=vcpus, storage_backing=storage_backing, cleanup='function')[1] if cpu_pol is not None: specs = {FlavorSpec.CPU_POLICY: cpu_pol} LOG.tc_step("Add following extra specs: {}".format(specs)) nova_helper.set_flavor(flavor=flavor_id, **specs) boot_source = 'volume' if vm_type == 'volume' else 'image' LOG.tc_step("Boot a vm from {}".format(boot_source)) vm_id = vm_helper.boot_vm('migration_test', flavor=flavor_id, source=boot_source, reuse_vol=False, cleanup='function')[1] if vm_type == 'image_with_vol': LOG.tc_step("Attach volume to vm") vm_helper.attach_vol_to_vm(vm_id=vm_id, mount=False) return vm_id
def _boot_migrable_vms(storage_backing): """ Create vms with specific storage backing that can be live migrated Args: storage_backing: 'local_image' or 'remote' Returns: (vms_info (list), flavors_created (list)) vms_info : [(vm_id1, block_mig1), (vm_id2, block_mig2), ...] """ vms_to_test = [] flavors_created = [] flavor_no_localdisk = nova_helper.create_flavor( ephemeral=0, swap=0, storage_backing=storage_backing)[1] flavors_created.append(flavor_no_localdisk) vm_1 = vm_helper.boot_vm(flavor=flavor_no_localdisk, source='volume')[1] block_mig_1 = False vms_to_test.append((vm_1, block_mig_1)) LOG.info( "Boot a VM from image if host storage backing is local_image or remote..." ) vm_2 = vm_helper.boot_vm(flavor=flavor_no_localdisk, source='image')[1] block_mig_2 = True vms_to_test.append((vm_2, block_mig_2)) if storage_backing == 'remote': LOG.info( "Boot a VM from volume with local disks if storage backing is remote..." ) ephemeral_swap = random.choice([[0, 512], [1, 512], [1, 0]]) flavor_with_localdisk = nova_helper.create_flavor( ephemeral=ephemeral_swap[0], swap=ephemeral_swap[1])[1] flavors_created.append(flavor_with_localdisk) vm_3 = vm_helper.boot_vm(flavor=flavor_with_localdisk, source='volume')[1] block_mig_3 = False vms_to_test.append((vm_3, block_mig_3)) LOG.info( "Boot a VM from image with volume attached if storage backing is remote..." ) vm_4 = vm_helper.boot_vm(flavor=flavor_no_localdisk, source='image')[1] vm_helper.attach_vol_to_vm(vm_id=vm_4) block_mig_4 = False vms_to_test.append((vm_4, block_mig_4)) return vms_to_test, flavors_created
def _boot_vm_under_test(storage_backing, ephemeral, swap, cpu_pol, vcpus, vm_type): LOG.tc_step( "Create a flavor with {} vcpus, {}G ephemera disk, {}M swap disk". format(vcpus, ephemeral, swap)) flavor_id = nova_helper.create_flavor(name='flv_4k', ephemeral=ephemeral, swap=swap, vcpus=vcpus, storage_backing=storage_backing)[1] ResourceCleanup.add('flavor', flavor_id) specs = {FlavorSpec.MEM_PAGE_SIZE: 'small'} if cpu_pol is not None: specs[FlavorSpec.CPU_POLICY] = cpu_pol LOG.tc_step("Add following extra specs: {}".format(specs)) nova_helper.set_flavor(flavor=flavor_id, **specs) boot_source = 'volume' if vm_type == 'volume' else 'image' LOG.tc_step("Boot a vm from {}".format(boot_source)) vm_id = vm_helper.boot_vm('4k_vm', flavor=flavor_id, source=boot_source, cleanup='function')[1] __check_pagesize(vm_id) if vm_type == 'image_with_vol': LOG.tc_step("Attach volume to vm") vm_helper.attach_vol_to_vm(vm_id=vm_id) # make sure the VM is up and pingable from natbox LOG.tc_step("Ping VM {} from NatBox(external network)".format(vm_id)) vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False) return vm_id
def test_evacuate_vms_with_inst_backing(self, hosts_per_backing, storage_backing): """ Test evacuate vms with various vm storage configs and host instance backing configs Args: storage_backing: storage backing under test Skip conditions: - Less than two hosts configured with storage backing under test Setups: - Add admin role to primary tenant (module) Test Steps: - Create flv_rootdisk without ephemeral or swap disks, and set storage backing extra spec - Create flv_ephemswap with ephemeral AND swap disks, and set storage backing extra spec - Boot following vms on same host and wait for them to be pingable from NatBox: - Boot vm1 from volume with flavor flv_rootdisk - Boot vm2 from volume with flavor flv_localdisk - Boot vm3 from image with flavor flv_rootdisk - Boot vm4 from image with flavor flv_rootdisk, and attach a volume to it - Boot vm5 from image with flavor flv_localdisk - sudo reboot -f on vms host - Ensure evacuation for all 5 vms are successful (vm host changed, active state, pingable from NatBox) Teardown: - Delete created vms, volumes, flavors - Remove admin role from primary tenant (module) """ hosts = hosts_per_backing.get(storage_backing, []) if len(hosts) < 2: skip( SkipStorageBacking.LESS_THAN_TWO_HOSTS_WITH_BACKING.format( storage_backing)) target_host = hosts[0] LOG.tc_step("Create a flavor without ephemeral or swap disks") flavor_1 = nova_helper.create_flavor( 'flv_rootdisk', storage_backing=storage_backing)[1] ResourceCleanup.add('flavor', flavor_1, scope='function') LOG.tc_step("Create another flavor with ephemeral and swap disks") flavor_2 = nova_helper.create_flavor( 'flv_ephemswap', ephemeral=1, swap=512, storage_backing=storage_backing)[1] ResourceCleanup.add('flavor', flavor_2, scope='function') LOG.tc_step("Boot vm1 from volume with flavor flv_rootdisk and wait " "for it pingable from NatBox") vm1_name = "vol_root" vm1 = vm_helper.boot_vm(vm1_name, flavor=flavor_1, source='volume', avail_zone='nova', vm_host=target_host, cleanup='function')[1] vms_info = { vm1: { 'ephemeral': 0, 'swap': 0, 'vm_type': 'volume', 'disks': vm_helper.get_vm_devices_via_virsh(vm1) } } vm_helper.wait_for_vm_pingable_from_natbox(vm1) LOG.tc_step("Boot vm2 from volume with flavor flv_localdisk and wait " "for it pingable from NatBox") vm2_name = "vol_ephemswap" vm2 = vm_helper.boot_vm(vm2_name, flavor=flavor_2, source='volume', avail_zone='nova', vm_host=target_host, cleanup='function')[1] vm_helper.wait_for_vm_pingable_from_natbox(vm2) vms_info[vm2] = { 'ephemeral': 1, 'swap': 512, 'vm_type': 'volume', 'disks': vm_helper.get_vm_devices_via_virsh(vm2) } LOG.tc_step( "Boot vm3 from image with flavor flv_rootdisk and wait for " "it pingable from NatBox") vm3_name = "image_root" vm3 = vm_helper.boot_vm(vm3_name, flavor=flavor_1, source='image', avail_zone='nova', vm_host=target_host, cleanup='function')[1] vm_helper.wait_for_vm_pingable_from_natbox(vm3) vms_info[vm3] = { 'ephemeral': 0, 'swap': 0, 'vm_type': 'image', 'disks': vm_helper.get_vm_devices_via_virsh(vm3) } LOG.tc_step("Boot vm4 from image with flavor flv_rootdisk, attach a " "volume to it and wait for it " "pingable from NatBox") vm4_name = 'image_root_attachvol' vm4 = vm_helper.boot_vm(vm4_name, flavor_1, source='image', avail_zone='nova', vm_host=target_host, cleanup='function')[1] vol = cinder_helper.create_volume(bootable=False)[1] ResourceCleanup.add('volume', vol, scope='function') vm_helper.attach_vol_to_vm(vm4, vol_id=vol, mount=False) vm_helper.wait_for_vm_pingable_from_natbox(vm4) vms_info[vm4] = { 'ephemeral': 0, 'swap': 0, 'vm_type': 'image_with_vol', 'disks': vm_helper.get_vm_devices_via_virsh(vm4) } LOG.tc_step("Boot vm5 from image with flavor flv_localdisk and wait " "for it pingable from NatBox") vm5_name = 'image_ephemswap' vm5 = vm_helper.boot_vm(vm5_name, flavor_2, source='image', avail_zone='nova', vm_host=target_host, cleanup='function')[1] vm_helper.wait_for_vm_pingable_from_natbox(vm5) vms_info[vm5] = { 'ephemeral': 1, 'swap': 512, 'vm_type': 'image', 'disks': vm_helper.get_vm_devices_via_virsh(vm5) } LOG.tc_step("Check all VMs are booted on {}".format(target_host)) vms_on_host = vm_helper.get_vms_on_host(hostname=target_host) vms = [vm1, vm2, vm3, vm4, vm5] assert set(vms) <= set(vms_on_host), "VMs booted on host: {}. " \ "Current vms on host: {}". \ format(vms, vms_on_host) for vm_ in vms: LOG.tc_step("Touch files under vm disks {}: " "{}".format(vm_, vms_info[vm_])) file_paths, content = touch_files_under_vm_disks( vm_, **vms_info[vm_]) vms_info[vm_]['file_paths'] = file_paths vms_info[vm_]['content'] = content LOG.tc_step("Reboot target host {}".format(target_host)) vm_helper.evacuate_vms(host=target_host, vms_to_check=vms, ping_vms=True) LOG.tc_step("Check files after evacuation") for vm_ in vms: LOG.info("--------------------Check files for vm {}".format(vm_)) check_helper.check_vm_files(vm_id=vm_, vm_action='evacuate', storage_backing=storage_backing, prev_host=target_host, **vms_info[vm_]) vm_helper.ping_vms_from_natbox(vms)
def vms_(self, add_admin_role_class): LOG.fixture_step("Create a flavor without ephemeral or swap disks") flavor_1 = nova_helper.create_flavor('flv_nolocaldisk')[1] ResourceCleanup.add('flavor', flavor_1, scope='class') LOG.fixture_step("Create a flavor with ephemeral and swap disks") flavor_2 = \ nova_helper.create_flavor('flv_localdisk', ephemeral=1, swap=512)[1] ResourceCleanup.add('flavor', flavor_2, scope='class') LOG.fixture_step( "Boot vm1 from volume with flavor flv_nolocaldisk and wait for it " "pingable from NatBox") vm1_name = "vol_nolocal" vm1 = vm_helper.boot_vm(vm1_name, flavor=flavor_1, source='volume', cleanup='class')[1] vm_helper.wait_for_vm_pingable_from_natbox(vm1) vm_host = vm_helper.get_vm_host(vm_id=vm1) LOG.fixture_step( "Boot vm2 from volume with flavor flv_localdisk and wait for it " "pingable from NatBox") vm2_name = "vol_local" vm2 = vm_helper.boot_vm(vm2_name, flavor=flavor_2, source='volume', cleanup='class', avail_zone='nova', vm_host=vm_host)[1] vm_helper.wait_for_vm_pingable_from_natbox(vm2) LOG.fixture_step( "Boot vm3 from image with flavor flv_nolocaldisk and wait for it " "pingable from NatBox") vm3_name = "image_novol" vm3 = vm_helper.boot_vm(vm3_name, flavor=flavor_1, source='image', cleanup='class', avail_zone='nova', vm_host=vm_host)[1] vm_helper.wait_for_vm_pingable_from_natbox(vm3) LOG.fixture_step( "Boot vm4 from image with flavor flv_nolocaldisk and wait for it " "pingable from NatBox") vm4_name = 'image_vol' vm4 = vm_helper.boot_vm(vm4_name, flavor_1, source='image', cleanup='class', avail_zone='nova', vm_host=vm_host)[1] vm_helper.wait_for_vm_pingable_from_natbox(vm4) LOG.fixture_step( "Attach volume to vm4 which was booted from image: {}.".format( vm4)) vm_helper.attach_vol_to_vm(vm4) return [vm1, vm2, vm3, vm4], vm_host
def test_instantiate_a_vm_with_multiple_volumes_and_migrate(): """ Test a vm with a multiple volumes live, cold migration and evacuation: Test Setups: - get guest image_id - get or create 'small' flavor_id - get tenenat and managment network ids Test Steps: - create volume for boot and another extra size 8GB - boot vms from the created volume - Validate that VMs boot, and that no timeouts or error status occur. - Verify VM status is ACTIVE - Attach the second volume to VM - Attempt live migrate VM - Login to VM and verify the filesystem is rw mode on both volumes - Attempt cold migrate VM - Login to VM and verify the filesystem is rw mode on both volumes - Reboot the compute host to initiate evacuation - Login to VM and verify the filesystem is rw mode on both volumes - Terminate VMs Skip conditions: - less than two computes - less than one storage """ # skip("Currently not working. Centos image doesn't see both volumes") LOG.tc_step("Creating a volume size=8GB.....") vol_id_0 = cinder_helper.create_volume(size=8)[1] ResourceCleanup.add('volume', vol_id_0, scope='function') LOG.tc_step("Creating a second volume size=8GB.....") vol_id_1 = cinder_helper.create_volume(size=8, bootable=False)[1] LOG.tc_step("Volume id is: {}".format(vol_id_1)) ResourceCleanup.add('volume', vol_id_1, scope='function') LOG.tc_step("Booting instance vm_0...") vm_id = vm_helper.boot_vm(name='vm_0', source='volume', source_id=vol_id_0, cleanup='function')[1] time.sleep(5) LOG.tc_step("Verify VM can be pinged from NAT box...") rc, boot_time = check_vm_boot_time(vm_id) assert rc, "VM is not pingable after {} seconds ".format(boot_time) LOG.tc_step("Login to VM and to check filesystem is rw mode....") assert is_vm_filesystem_rw( vm_id), 'vol_0 rootfs filesystem is not RW as expected.' LOG.tc_step("Attemping to attach a second volume to VM...") vm_helper.attach_vol_to_vm(vm_id, vol_id_1) LOG.tc_step( "Login to VM and to check filesystem is rw mode for both volumes....") assert is_vm_filesystem_rw(vm_id, rootfs=['vda', 'vdb']), 'volumes rootfs ' \ 'filesystem is not RW ' \ 'as expected.' LOG.tc_step("Attemping live migrate VM...") vm_helper.live_migrate_vm(vm_id=vm_id) LOG.tc_step("Login to VM and to check filesystem is rw mode after live " "migration....") assert is_vm_filesystem_rw(vm_id, rootfs=['vda', 'vdb']), 'After live migration ' \ 'rootfs filesystem is ' \ 'not RW' LOG.tc_step("Attempting cold migrate VM...") vm_helper.cold_migrate_vm(vm_id) LOG.tc_step("Login to VM and to check filesystem is rw mode after live " "migration....") assert is_vm_filesystem_rw(vm_id, rootfs=['vda', 'vdb']), 'After cold migration ' \ 'rootfs filesystem is ' \ 'not RW' LOG.tc_step("Testing VM evacuation.....") before_host_0 = vm_helper.get_vm_host(vm_id) LOG.tc_step("Rebooting compute {} to initiate vm evacuation .....".format( before_host_0)) vm_helper.evacuate_vms(host=before_host_0, vms_to_check=vm_id, ping_vms=True) LOG.tc_step("Login to VM and to check filesystem is rw mode after live " "migration....") assert is_vm_filesystem_rw(vm_id, rootfs=['vda', 'vdb']), 'After evacuation ' \ 'filesystem is not RW'
def vms_with_upgrade(): """ Test test_vms_with_upgrade is for create various vms before upgrade Skip conditions: - Less than two hosts configured with storage backing under test Setups: - Add admin role to primary tenant (module) Test Steps: - Create flv_rootdisk without ephemeral or swap disks, and set storage backing extra spec - Create flv_ephemswap with ephemeral AND swap disks, and set storage backing extra spec - Boot following vms and wait for them to be pingable from NatBox: - Boot vm1 from volume with flavor flv_rootdisk - Boot vm2 from volume with flavor flv_localdisk - Boot vm3 from image with flavor flv_rootdisk - Boot vm4 from image with flavor flv_rootdisk, and attach a volume to it - Boot vm5 from image with flavor flv_localdisk - start upgrade ....Follows upgrade procedure - Ping NAT during the upgrade before live migration - complete upgrade. Teardown: - Not complete ....Delete created vms, volumes, flavors """ ProjVar.set_var(SOURCE_OPENRC=True) Tenant.set_primary('tenant2') LOG.fixture_step("Create a flavor without ephemeral or swap disks") flavor_1 = nova_helper.create_flavor('flv_rootdisk')[1] ResourceCleanup.add('flavor', flavor_1) LOG.fixture_step("Create another flavor with ephemeral and swap disks") flavor_2 = nova_helper.create_flavor('flv_ephemswap', ephemeral=1, swap=512)[1] ResourceCleanup.add('flavor', flavor_2) LOG.fixture_step( "Boot vm1 from volume with flavor flv_rootdisk and wait for it pingable from NatBox" ) vm1_name = "vol_root" vm1 = vm_helper.boot_vm(vm1_name, flavor=flavor_1, cleanup='function')[1] LOG.fixture_step( "Boot vm2 from volume with flavor flv_localdisk and wait for it pingable from NatBox" ) vm2_name = "vol_ephemswap" vm2 = vm_helper.boot_vm(vm2_name, flavor=flavor_2, cleanup='function')[1] vm_helper.wait_for_vm_pingable_from_natbox(vm1) vm_helper.wait_for_vm_pingable_from_natbox(vm2) LOG.fixture_step( "Boot vm3 from image with flavor flv_rootdisk and wait for it pingable from NatBox" ) vm3_name = "image_root" vm3 = vm_helper.boot_vm(vm3_name, flavor=flavor_1, cleanup='function')[1] LOG.fixture_step( "Boot vm4 from image with flavor flv_rootdisk, attach a volume to it and wait for it " "pingable from NatBox") vm4_name = 'image_root_attachvol' vm4 = vm_helper.boot_vm(vm4_name, flavor_1, cleanup='function')[1] vol = cinder_helper.create_volume(bootable=False)[1] ResourceCleanup.add('volume', vol) vm_helper.attach_vol_to_vm(vm4, vol_id=vol) LOG.fixture_step( "Boot vm5 from image with flavor flv_localdisk and wait for it pingable from NatBox" ) vm5_name = 'image_ephemswap' vm5 = vm_helper.boot_vm(vm5_name, flavor_2, source='image', cleanup='function')[1] vm_helper.wait_for_vm_pingable_from_natbox(vm4) vm_helper.wait_for_vm_pingable_from_natbox(vm5) vms = [vm1, vm2, vm3, vm4, vm5] return vms
def test_attach_cinder_volume_to_instance(vol_vif, check_avs_pattern): """ Validate that cinder volume can be attached to VM created using wrl5_avp and wrl5_virtio image Args: vol_vif (str) Test Steps: - Create cinder volume - Boot VM use WRL image - Attach cinder volume to WRL virtio/avp instance - Check VM nics vifs are not changed Teardown: - Delete VM - Delete cinder volume """ mgmt_net_id = network_helper.get_mgmt_net_id() tenant_net_id = network_helper.get_tenant_net_id() internal_net_id = network_helper.get_internal_net_id() vif_model = 'avp' if system_helper.is_avs() else 'virtio' nics = [ { 'net-id': mgmt_net_id }, { 'net-id': tenant_net_id }, { 'net-id': internal_net_id, 'vif-model': vif_model }, ] LOG.tc_step("Boot up VM from default tis image") vm_id = vm_helper.boot_vm(name='vm_attach_vol_{}'.format(vol_vif), source='image', nics=nics, cleanup='function')[1] prev_ports = network_helper.get_ports(server=vm_id) LOG.tc_step( "Create an image with vif model metadata set to {}".format(vol_vif)) img_id = glance_helper.create_image('vif_{}'.format(vol_vif), cleanup='function', **{ImageMetadata.VIF_MODEL: vol_vif})[1] LOG.tc_step("Boot a volume from above image") volume_id = cinder_helper.create_volume('vif_{}'.format(vol_vif), source_id=img_id, cleanup='function')[1] # boot a cinder volume and attached it to vm LOG.tc_step("Attach cinder Volume to VM") vm_helper.attach_vol_to_vm(vm_id, vol_id=volume_id) LOG.tc_step("Check vm nics vif models are not changed") post_ports = network_helper.get_ports(server=vm_id) assert prev_ports == post_ports