def test_sparsify_disk1(engine_api): engine = engine_api.system_service() disk_service = test_utils.get_disk_service(engine, DISK1_NAME) with engine_utils.wait_for_event(engine, 1325): # USER_SPARSIFY_IMAGE_START event disk_service.sparsify() with engine_utils.wait_for_event( engine, 1326): # USER_SPARSIFY_IMAGE_FINISH_SUCCESS pass # Make sure disk is unlocked assert disk_service.get().status == types.DiskStatus.OK
def test_ha_recovery(engine_api, get_ansible_host_for_vm): engine = engine_api.system_service() with engine_utils.wait_for_event(engine, [119, 9602, 506]): # VM_DOWN_ERROR event(119) # HA_VM_FAILED event event(9602) # VDS_INITIATED_RUN_VM event(506) ansible_host = get_ansible_host_for_vm(VM2_NAME) pid = ansible_host.shell('pgrep -f qemu.*guest=vm2')['stdout'].strip() ansible_host.shell('kill -KILL {}'.format(pid)) vm_service = test_utils.get_vm_service(engine, VM2_NAME) assertions.assert_true_within_long( lambda: vm_service.get().status == types.VmStatus.UP) with engine_utils.wait_for_event(engine, 33): # USER_STOP_VM event vm_service.stop()
def test_add_snapshot_for_backup(engine_api): engine = engine_api.system_service() vm2_disk_attachments_service = test_utils.get_disk_attachments_service( engine, VM2_NAME) disk = vm2_disk_attachments_service.list()[0] backup_snapshot_params = types.Snapshot( description=SNAPSHOT_FOR_BACKUP_VM, persist_memorystate=False, disk_attachments=[types.DiskAttachment(disk=types.Disk(id=disk.id))]) vm2_snapshots_service = test_utils.get_vm_snapshots_service( engine, VM2_NAME) correlation_id = uuid.uuid4() with engine_utils.wait_for_event(engine, [45, 68]): # USER_CREATE_SNAPSHOT(41) event # USER_CREATE_SNAPSHOT_FINISHED_SUCCESS(68) event vm2_snapshots_service.add(backup_snapshot_params, query={'correlation_id': correlation_id}) assertions.assert_true_within_long( lambda: test_utils.all_jobs_finished(engine, correlation_id)) assertions.assert_true_within_long( lambda: vm2_snapshots_service.list()[-1].snapshot_status == types. SnapshotStatus.OK, )
def test_list_glance_images(engine_api): search_query = 'name={}'.format(SD_GLANCE_NAME) system_service = engine_api.system_service() storage_domains_service = system_service.storage_domains_service() glance_domain_list = storage_domains_service.list(search=search_query) if not glance_domain_list: openstack_glance = glance.add_domain(system_service, SD_GLANCE_NAME, GLANCE_SERVER_URL) if not openstack_glance: raise RuntimeError('GLANCE storage domain is not available.') glance_domain_list = storage_domains_service.list(search=search_query) if not glance.check_connectivity(system_service, SD_GLANCE_NAME): raise RuntimeError('GLANCE connectivity test failed') glance_domain = glance_domain_list.pop() glance_domain_service = storage_domains_service.storage_domain_service( glance_domain.id) try: with engine_utils.wait_for_event(system_service, 998): all_images = glance_domain_service.images_service().list() if not len(all_images): raise RuntimeError('No GLANCE images available') except sdk4.Error: raise RuntimeError('GLANCE is not available: client request error')
def test_add_ldap_group(engine_api): engine = engine_api.system_service() groups_service = engine.groups_service() with engine_utils.wait_for_event(engine, 149): # USER_ADD(149) groups_service.add( types.Group( name=AAA_LDAP_GROUP, domain=types.Domain(name=AAA_LDAP_AUTHZ_PROVIDER), ), )
def test_add_ldap_user(engine_api): engine = engine_api.system_service() users_service = engine.users_service() with engine_utils.wait_for_event(engine, 149): # USER_ADD(149) users_service.add( types.User( user_name=AAA_LDAP_USER, domain=types.Domain(name=AAA_LDAP_AUTHZ_PROVIDER), ), )
def test_export_vm1(engine_api): engine = engine_api.system_service() vm_service = test_utils.get_vm_service(engine, VM1_NAME) host = test_utils.get_first_active_host_by_name(engine) with engine_utils.wait_for_event( engine, 1223): # IMPORTEXPORT_STARTING_EXPORT_VM_TO_OVA event vm_service.export_to_path_on_host(host=types.Host(id=host.id), directory=OVA_DIR, filename=OVA_VM_EXPORT_NAME, async=True)
def test_template_export(engine_api, cirros_image_glance_template_name): engine = engine_api.system_service() template_guest = test_utils.get_template_service( engine, cirros_image_glance_template_name) if template_guest is None: pytest.skip('{0}: template {1} is missing'.format( template_export.__name__, cirros_image_glance_template_name)) storage_domain = engine.storage_domains_service().list( search='name={}'.format(SD_TEMPLATES_NAME))[0] with engine_utils.wait_for_event(engine, 1164): # IMPORTEXPORT_STARTING_EXPORT_TEMPLATE event template_guest.export(storage_domain=types.StorageDomain( id=storage_domain.id, ), ) with engine_utils.wait_for_event(engine, 1156): # IMPORTEXPORT_EXPORT_TEMPLATE event assertions.assert_true_within_long( lambda: template_guest.get().status == types.TemplateStatus.OK, )
def test_remove_backup_vm_and_backup_snapshot(engine_api): engine = engine_api.system_service() backup_vm_service = test_utils.get_vm_service(engine, BACKUP_VM_NAME) vm2_snapshots_service = test_utils.get_vm_snapshots_service( engine, VM2_NAME) vm2_snapshot = vm2_snapshots_service.list()[-1] # power-off backup-vm with engine_utils.wait_for_event(engine, [33, 61]): # VM_DOWN(61) event # USER_STOP_VM(33) event backup_vm_service.stop() assertions.assert_true_within_long( lambda: backup_vm_service.get().status == types.VmStatus.DOWN) # remove backup_vm num_of_vms = len(engine.vms_service().list()) backup_vm_service.remove() assert len(engine.vms_service().list()) == (num_of_vms - 1) with engine_utils.wait_for_event(engine, 342): # USER_REMOVE_SNAPSHOT event # remove vm2 snapshot vm2_snapshots_service.snapshot_service(vm2_snapshot.id).remove()
def _rollback_to_previous_layer_and_reboot(engine, ansible_host): LOGGER.info("return to previous release") ansible_host.shell("imgbase rollback") # consider replacing that with moving to maintainance and reboot # and host is in a state engine cannot check updates try: LOGGER.info("rebooting host") ansible_host.shell("systemctl reboot") except Exception: with engine_utils.wait_for_event(engine, [13]): # VDS_DETECTED 13 LOGGER.info("waiting for host to be up..")
def test_remove_vm_pool(engine_api): engine = engine_api.system_service() pool_service = test_utils.get_pool_service(engine, VMPOOL_NAME) correlation_id = uuid.uuid4() with engine_utils.wait_for_event(engine, [321, 304]): # USER_REMOVE_VM_POOL_INITIATED(321) event # USER_REMOVE_VM_POOL(304) event pool_service.remove(query={'correlation_id': correlation_id}) vm_pools_service = engine_api.system_service().vm_pools_service() assert len(vm_pools_service.list()) == 0 assertions.assert_true_within_long( lambda: test_utils.all_jobs_finished(engine, correlation_id))
def test_hotplug_cpu(engine_api, vm_ssh): engine = engine_api.system_service() vm_service = test_utils.get_vm_service(engine, VM0_NAME) new_cpu = vm_service.get().cpu new_cpu.topology.sockets = 2 with engine_utils.wait_for_event(engine, 2033): # HOT_SET_NUMBER_OF_CPUS(2,033) vm_service.update(vm=types.Vm(cpu=new_cpu)) assert vm_service.get().cpu.topology.sockets == 2 ret = vm_ssh(VM0_NAME, 'lscpu') assert ret.code == 0 match = re.search(r'CPU\(s\):\s+(?P<cpus>[0-9]+)', ret.out.decode('utf-8')) assert match.group('cpus') == '2'
def test_next_run_unplug_cpu(engine_api): engine = engine_api.system_service() vm_service = test_utils.get_vm_service(engine, VM0_NAME) new_cpu = vm_service.get().cpu new_cpu.topology.sockets = 1 vm_service.update(vm=types.Vm(cpu=new_cpu, ), next_run=True) assert vm_service.get().cpu.topology.sockets == 2 assert vm_service.get(next_run=True).cpu.topology.sockets == 1 with engine_utils.wait_for_event(engine, 157): # USER_REBOOT_VM(157) vm_service.reboot() assertions.assert_true_within_long( lambda: vm_service.get().status == types.VmStatus.UP) assert vm_service.get().cpu.topology.sockets == 1
def test_hotunplug_memory(assert_vm_is_alive, engine_api, get_vm_libvirt_memory_amount, hotplug_mem_amount): engine = engine_api.system_service() vm_service = test_utils.get_vm_service(engine, VM0_NAME) new_memory = vm_service.get().memory - hotplug_mem_amount with engine_utils.wait_for_event( engine, 2046): # MEMORY_HOT_UNPLUG_SUCCESSFULLY_REQUESTED(2,046) vm_service.update(vm=types.Vm(memory=new_memory, memory_policy=types.MemoryPolicy( guaranteed=new_memory, ))) assert vm_service.get().memory == new_memory assert_vm_is_alive(VM0_NAME) assert get_vm_libvirt_memory_amount(VM0_NAME) // KB == new_memory // MB
def _perform_update(): host_list = hosts.list() LOGGER.info( "_perform_update called with timout {}".format(host_list)) for host in host_list: host_service = hosts.host_service(host.id) LOGGER.info("_perform_update on host id:{}".format(host.id)) with engine_utils.wait_for_event(engine, [884, 885], _timeout): LOGGER.info("upgrade check") # HOST_AVAILABLE_UPDATES_STARTED(884) # HOST_AVAILABLE_UPDATES_FINISHED(885) # HOST_AVAILABLE_UPDATES_SKIPPED_UNSUPPORTED_STATUS(887) host_service.upgrade_check() with engine_utils.wait_for_event(engine, [15, 840, 888], _timeout): LOGGER.info("update") # HOST_UPGRADE_FINISHED_AND_WILL_BE_REBOOTED(888) # HOST_UPGRADE_STARTED(840) # VDS_MAINTENANCE(15) host_service.upgrade(reboot=True) LOGGER.info("updrade process finished") _wait_for_status(hosts, DC_NAME, types.HostStatus.UP) LOGGER.info("host are up after upgrade") for host in host_list: ansible_host = ansible_by_hostname(host.name) new_ver = ansible_host.shell( "cat /var/imgbased/.image-updated |" "grep -Po '(?<=update-).*(?=.squashfs.img)'") LOGGER.info( "{} upgraded to: {}".format(host.name, new_ver['stdout_lines'])) assert new_ver['stdout_lines'] == [versions_available['node']] return True
def add(api, domain, dc_name): system_service = api.system_service() sds_service = system_service.storage_domains_service() with engine_utils.wait_for_event(system_service, 956): # USER_ADD_STORAGE_DOMAIN(956) sd = sds_service.add(domain) sd_service = sds_service.storage_domain_service(sd.id) assertions.assert_true_within_long(lambda: sd_service.get( ).status == sdk4.types.StorageDomainStatus.UNATTACHED) data_centers = system_service.data_centers_service() dc = data_centers.list(search='name={}'.format(dc_name))[0] dc_service = data_centers.data_center_service(dc.id) attached_sds_service = dc_service.storage_domains_service() with engine_utils.wait_for_event(system_service, [966, 962]): # USER_ACTIVATED_STORAGE_DOMAIN(966) # USER_ATTACH_STORAGE_DOMAIN_TO_POOL(962) attached_sds_service.add(sdk4.types.StorageDomain(id=sd.id, ), ) attached_sd_service = attached_sds_service.storage_domain_service( sd.id) assertions.assert_true_within_long(lambda: attached_sd_service.get( ).status == sdk4.types.StorageDomainStatus.ACTIVE)
def cold_storage_migration(engine_api): engine = engine_api.system_service() disk_service = test_utils.get_disk_service(engine, DISK2_NAME) # Cold migrate the disk to ISCSI storage domain and then migrate it back # to the NFS domain because it is used by other cases that assume the # disk found on that specific domain for domain in [SD_ISCSI_NAME, SD_SECOND_NFS_NAME]: with engine_utils.wait_for_event(engine, 2008): # USER_MOVED_DISK(2,008) disk_service.move(async=False, storage_domain=types.StorageDomain(name=domain)) assertions.assert_true_within_long(lambda: engine_api.follow_link( disk_service.get().storage_domains[0]).name == domain) assertions.assert_true_within_long( lambda: disk_service.get().status == types.DiskStatus.OK)
def test_add_vm_pool(engine_api, cirros_image_glance_template_name): engine = engine_api.system_service() pools_service = engine.vm_pools_service() pool_cluster = engine.clusters_service().list( search='name={}'.format(TEST_CLUSTER))[0] pool_template = engine.templates_service().list( search='name={}'.format(cirros_image_glance_template_name))[0] with engine_utils.wait_for_event(engine, 302): pools_service.add(pool=types.VmPool( name=VMPOOL_NAME, cluster=pool_cluster, template=pool_template, use_latest_template_version=True, )) vm_service = test_utils.get_vm_service(engine, VMPOOL_NAME + '-1') assertions.assert_true_within_short( lambda: vm_service.get().status == types.VmStatus.DOWN, allowed_exceptions=[IndexError])
def test_extend_disk1(engine_api): engine = engine_api.system_service() disk_attachments_service = test_utils.get_disk_attachments_service( engine, VM1_NAME) for disk_attachment in disk_attachments_service.list(): disk = engine_api.follow_link(disk_attachment.disk) if disk.name == DISK1_NAME: attachment_service = disk_attachments_service.attachment_service( disk_attachment.id) with engine_utils.wait_for_event( engine, 371): # USER_EXTEND_DISK_SIZE_SUCCESS(371) attachment_service.update( types.DiskAttachment(disk=types.Disk(provisioned_size=2 * GB, ))) disk_service = test_utils.get_disk_service(engine, DISK1_NAME) assertions.assert_true_within_short( lambda: disk_service.get().status == types.DiskStatus.OK) assertions.assert_true_within_short( lambda: disk_service.get().provisioned_size == 2 * GB)
def test_hotplug_memory(assert_vm_is_alive, engine_api, get_vm_libvirt_memory_amount, hotplug_mem_amount): engine = engine_api.system_service() vm_service = test_utils.get_vm_service(engine, VM0_NAME) new_memory = vm_service.get().memory + hotplug_mem_amount with engine_utils.wait_for_event(engine, 2039): # HOT_SET_MEMORY(2,039) vm_service.update(vm=types.Vm( memory=new_memory, # Need to avoid OOM scenario where ballooning would immediately try to claim some memory. # CirrOS is lacking memory onlining rules so the guest memory doesn't really increase and # balloon inflation just crashes the guest instead. Balloon gets inflated because MOM # does not know that guest size didn't increase and just assumes it did, and the host # OST VM is likely under memory pressure, there's not much free RAM in OST environment. # Setting minimum guaranteed to new memory size keeps MOM from inflating balloon. memory_policy=types.MemoryPolicy(guaranteed=new_memory, ))) assert vm_service.get().memory == new_memory assert_vm_is_alive(VM0_NAME) assert get_vm_libvirt_memory_amount(VM0_NAME) // KB == new_memory // MB
def test_make_snapshot_with_memory(engine_api): engine = engine_api.system_service() vm_service = test_utils.get_vm_service(engine, VM0_NAME) disks_service = engine.disks_service() vm_disks_service = \ test_utils.get_disk_attachments_service(engine, VM0_NAME) vm_disks = [ disks_service.disk_service(attachment.disk.id).get() for attachment in vm_disks_service.list() ] disk_attachments = [ types.DiskAttachment(disk=types.Disk(id=disk.id)) for disk in vm_disks if disk.storage_type != types.DiskStorageType.LUN ] snapshots_service = vm_service.snapshots_service() snapshot_params = types.Snapshot(description=SNAPSHOT_DESC_MEM, persist_memorystate=True, disk_attachments=disk_attachments) with engine_utils.wait_for_event(engine, 45): # USER_CREATE_SNAPSHOT event snapshots_service.add(snapshot_params)
def test_attach_snapshot_to_backup_vm(engine_api): engine = engine_api.system_service() vm2_snapshots_service = test_utils.get_vm_snapshots_service( engine, VM2_NAME) vm2_disk_attachments_service = test_utils.get_disk_attachments_service( engine, VM2_NAME) vm2_disk = vm2_disk_attachments_service.list()[0] disk_attachments_service = test_utils.get_disk_attachments_service( engine, BACKUP_VM_NAME) with engine_utils.wait_for_event(engine, 2016): # USER_ATTACH_DISK_TO_VM event disk_attachments_service.add( types.DiskAttachment(disk=types.Disk( id=vm2_disk.id, snapshot=types.Snapshot( id=vm2_snapshots_service.list()[-1].id)), interface=types.DiskInterface.VIRTIO_SCSI, bootable=False, active=True)) assert len(disk_attachments_service.list()) > 0
def test_hotunplug_disk(engine_api): engine = engine_api.system_service() disk_service = test_utils.get_disk_service(engine, DISK0_NAME) disk_attachments_service = test_utils.get_disk_attachments_service( engine, VM0_NAME) disk_attachment = disk_attachments_service.attachment_service( disk_service.get().id) with engine_utils.wait_for_event(engine, 2002): # USER_HOTUNPLUG_DISK(2,002) correlation_id = 'test_hotunplug_disk' assert disk_attachment.update(types.DiskAttachment(active=False), query={'correlation_id': correlation_id}) assertions.assert_true_within_long( lambda: test_utils.all_jobs_finished(engine, correlation_id)) assertions.assert_true_within_short( lambda: disk_service.get().status == types.DiskStatus.OK) assertions.assert_true_within_short( lambda: disk_attachment.get().active == False)
def test_import_vm1(engine_api): engine = engine_api.system_service() sd = engine.storage_domains_service().list( search='name={}'.format(SD_ISCSI_NAME))[0] cluster = engine.clusters_service().list( search='name={}'.format(TEST_CLUSTER))[0] imports_service = engine.external_vm_imports_service() host = test_utils.get_first_active_host_by_name(engine) correlation_id = "test_validate_ova_import_vm" with engine_utils.wait_for_event(engine, 1165): # IMPORTEXPORT_STARTING_IMPORT_VM imports_service.add(types.ExternalVmImport( name=IMPORTED_VM_NAME, provider=types.ExternalVmProviderType.KVM, url=IMPORTED_OVA_NAME, cluster=types.Cluster(id=cluster.id), storage_domain=types.StorageDomain(id=sd.id), host=types.Host(id=host.id), sparse=True), async=True, query={'correlation_id': correlation_id})