def test_verify_backup_snapshot_removed(engine_api): engine = engine_api.system_service() vm2_snapshots_service = test_utils.get_vm_snapshots_service( engine, VM2_NAME) assertions.assert_true_within_long( lambda: len(vm2_snapshots_service.list()) == 1)
def test_add_snapshot_for_backup(engine_api): engine = engine_api.system_service() vm2_disk_attachments_service = test_utils.get_disk_attachments_service( engine, VM2_NAME) disk = vm2_disk_attachments_service.list()[0] backup_snapshot_params = types.Snapshot( description=SNAPSHOT_FOR_BACKUP_VM, persist_memorystate=False, disk_attachments=[types.DiskAttachment(disk=types.Disk(id=disk.id))]) vm2_snapshots_service = test_utils.get_vm_snapshots_service( engine, VM2_NAME) correlation_id = uuid.uuid4() with engine_utils.wait_for_event(engine, [45, 68]): # USER_CREATE_SNAPSHOT(41) event # USER_CREATE_SNAPSHOT_FINISHED_SUCCESS(68) event vm2_snapshots_service.add(backup_snapshot_params, query={'correlation_id': correlation_id}) assertions.assert_true_within_long( lambda: test_utils.all_jobs_finished(engine, correlation_id)) assertions.assert_true_within_long( lambda: vm2_snapshots_service.list()[-1].snapshot_status == types. SnapshotStatus.OK, )
def setup_virtual_machines(engine_api): vm_service = test_utils.get_vm_service(engine_api.system_service(), 'vm0') if vm_service.get().status == types.VmStatus.DOWN: vm_service.start() assertions.assert_true_within_long( lambda: vm_service.get().status == types.VmStatus.POWERING_UP )
def test_check_snapshot_with_memory(engine_api): engine = engine_api.system_service() vm_service = test_utils.get_vm_service(engine, VM0_NAME) assertions.assert_true_within_long( lambda: test_utils.get_snapshot(engine, VM0_NAME, SNAPSHOT_DESC_MEM). snapshot_status == types.SnapshotStatus.IN_PREVIEW) vm_service.start() _verify_vm_state(engine, VM0_NAME, types.VmStatus.UP)
def test_update_vm_pool(engine_api): engine = engine_api.system_service() pool_service = test_utils.get_pool_service(engine, VMPOOL_NAME) correlation_id = uuid.uuid4() pool_service.update(pool=types.VmPool(max_user_vms=2), query={'correlation_id': correlation_id}) assert pool_service.get().max_user_vms == 2 assertions.assert_true_within_long( lambda: test_utils.all_jobs_finished(engine, correlation_id))
def is_alive(vm_name): def _ping(): ansible_host0.shell('ping -4 -c 1 -W 60 {}'.format(vm_name)) return True assertions.assert_true_within_long( _ping, allowed_exceptions=[ansible.AnsibleExecutionError]) assert vm_ssh(vm_name, 'true').code == EX_OK
def test_clear_global_maintenance(ansible_host0): logging.info('Waiting For System Stability...') he_utils.wait_until_engine_vm_is_not_migrating(ansible_host0) he_utils.set_and_test_global_maintenance_mode(ansible_host0, False) assertions.assert_true_within_long( lambda: he_utils.no_hosts_state_global_maintenance(ansible_host0)) logging.info('Global maintenance state cleared on all hosts')
def test_remove_vm_pool(engine_api): engine = engine_api.system_service() pool_service = test_utils.get_pool_service(engine, VMPOOL_NAME) correlation_id = uuid.uuid4() with engine_utils.wait_for_event(engine, [321, 304]): # USER_REMOVE_VM_POOL_INITIATED(321) event # USER_REMOVE_VM_POOL(304) event pool_service.remove(query={'correlation_id': correlation_id}) vm_pools_service = engine_api.system_service().vm_pools_service() assert len(vm_pools_service.list()) == 0 assertions.assert_true_within_long( lambda: test_utils.all_jobs_finished(engine, correlation_id))
def test_preview_snapshot_with_memory(engine_api): engine = engine_api.system_service() events = engine.events_service() assertions.assert_true_within_long( # wait for event 68 == USER_CREATE_SNAPSHOT_FINISHED_SUCCESS lambda: any(e.code == 68 for e in events.list(max=6))) vm_service = test_utils.get_vm_service(engine, VM0_NAME) vm_service.stop() _verify_vm_state(engine, VM0_NAME, types.VmStatus.DOWN) snapshot = test_utils.get_snapshot(engine, VM0_NAME, SNAPSHOT_DESC_MEM) vm_service.preview_snapshot(snapshot=snapshot, async=False, restore_memory=True)
def _restart_services(ansible_host): logging.info('Stopping services...') ansible_host.shell( 'systemctl stop vdsmd supervdsmd ovirt-ha-broker ovirt-ha-agent') logging.info('Starting services...') ansible_host.shell( 'systemctl start vdsmd supervdsmd ovirt-ha-broker ovirt-ha-agent') logging.info('Waiting for agent to be ready...') assertions.assert_true_within_long( lambda: _ha_agent_is_ready(ansible_host)) logging.info('Agent is ready.')
def test_suspend_resume_vm0(assert_vm_is_alive, engine_api, vm_ssh): # start a background job we are going to check if it's still running later ret = vm_ssh(VM0_NAME, 'sleep 3600 &') assert ret.code == EX_OK assert_vm_is_alive(VM0_NAME) vm_service = test_utils.get_vm_service(engine_api.system_service(), VM0_NAME) vm_service.suspend() assertions.assert_true_within_long( lambda: vm_service.get().status == types.VmStatus.SUSPENDED) vm_service.start()
def test_next_run_unplug_cpu(engine_api): engine = engine_api.system_service() vm_service = test_utils.get_vm_service(engine, VM0_NAME) new_cpu = vm_service.get().cpu new_cpu.topology.sockets = 1 vm_service.update(vm=types.Vm(cpu=new_cpu, ), next_run=True) assert vm_service.get().cpu.topology.sockets == 2 assert vm_service.get(next_run=True).cpu.topology.sockets == 1 with engine_utils.wait_for_event(engine, 157): # USER_REBOOT_VM(157) vm_service.reboot() assertions.assert_true_within_long( lambda: vm_service.get().status == types.VmStatus.UP) assert vm_service.get().cpu.topology.sockets == 1
def test_ha_recovery(engine_api, get_ansible_host_for_vm): engine = engine_api.system_service() with engine_utils.wait_for_event(engine, [119, 9602, 506]): # VM_DOWN_ERROR event(119) # HA_VM_FAILED event event(9602) # VDS_INITIATED_RUN_VM event(506) ansible_host = get_ansible_host_for_vm(VM2_NAME) pid = ansible_host.shell('pgrep -f qemu.*guest=vm2')['stdout'].strip() ansible_host.shell('kill -KILL {}'.format(pid)) vm_service = test_utils.get_vm_service(engine, VM2_NAME) assertions.assert_true_within_long( lambda: vm_service.get().status == types.VmStatus.UP) with engine_utils.wait_for_event(engine, 33): # USER_STOP_VM event vm_service.stop()
def test_reconstruct_master_domain(engine_api): pytest.skip('TODO:Handle case where tasks are running') system_service = engine_api.system_service() dc_service = test_utils.data_center_service(system_service, DC_NAME) attached_sds_service = dc_service.storage_domains_service() master_sd = next(sd for sd in attached_sds_service.list() if sd.master) attached_sd_service = attached_sds_service.storage_domain_service( master_sd.id) attached_sd_service.deactivate() assertions.assert_true_within_long(lambda: attached_sd_service.get( ).status == types.StorageDomainStatus.MAINTENANCE) new_master_sd = next(sd for sd in attached_sds_service.list() if sd.master) assert new_master_sd.id != master_sd.id attached_sd_service.activate() assertions.assert_true_within_long(lambda: attached_sd_service.get().status == types.StorageDomainStatus.ACTIVE)
def wait_for_event(engine, event_id): ''' event_id could either be an int - a single event ID or a list - multiple event IDs that all will be checked ''' events = engine.events_service() last_event = int(events.list(max=2)[0].id) try: yield finally: if isinstance(event_id, int): event_id = [event_id] for e_id in event_id: assertions.assert_true_within_long(lambda: any( e.code == e_id for e in events.list(from_=last_event)))
def cold_storage_migration(engine_api): engine = engine_api.system_service() disk_service = test_utils.get_disk_service(engine, DISK2_NAME) # Cold migrate the disk to ISCSI storage domain and then migrate it back # to the NFS domain because it is used by other cases that assume the # disk found on that specific domain for domain in [SD_ISCSI_NAME, SD_SECOND_NFS_NAME]: with engine_utils.wait_for_event(engine, 2008): # USER_MOVED_DISK(2,008) disk_service.move(async=False, storage_domain=types.StorageDomain(name=domain)) assertions.assert_true_within_long(lambda: engine_api.follow_link( disk_service.get().storage_domains[0]).name == domain) assertions.assert_true_within_long( lambda: disk_service.get().status == types.DiskStatus.OK)
def test_update_template_version(engine_api, cirros_image_glance_template_name): engine = engine_api.system_service() stateless_vm = engine.vms_service().list( search='name={}'.format(VM1_NAME))[0] templates_service = engine.templates_service() template = templates_service.list( search='name={}'.format(cirros_image_glance_template_name))[0] assert stateless_vm.memory != template.memory templates_service.add( template=types.Template(name=cirros_image_glance_template_name, vm=stateless_vm, version=types.TemplateVersion( base_template=template, version_number=2))) pool_service = test_utils.get_pool_service(engine, VMPOOL_NAME) assertions.assert_true_within_long( lambda: pool_service.get().vm.memory == stateless_vm.memory)
def test_template_export(engine_api, cirros_image_glance_template_name): engine = engine_api.system_service() template_guest = test_utils.get_template_service( engine, cirros_image_glance_template_name) if template_guest is None: pytest.skip('{0}: template {1} is missing'.format( template_export.__name__, cirros_image_glance_template_name)) storage_domain = engine.storage_domains_service().list( search='name={}'.format(SD_TEMPLATES_NAME))[0] with engine_utils.wait_for_event(engine, 1164): # IMPORTEXPORT_STARTING_EXPORT_TEMPLATE event template_guest.export(storage_domain=types.StorageDomain( id=storage_domain.id, ), ) with engine_utils.wait_for_event(engine, 1156): # IMPORTEXPORT_EXPORT_TEMPLATE event assertions.assert_true_within_long( lambda: template_guest.get().status == types.TemplateStatus.OK, )
def test_hotunplug_disk(engine_api): engine = engine_api.system_service() disk_service = test_utils.get_disk_service(engine, DISK0_NAME) disk_attachments_service = test_utils.get_disk_attachments_service( engine, VM0_NAME) disk_attachment = disk_attachments_service.attachment_service( disk_service.get().id) with engine_utils.wait_for_event(engine, 2002): # USER_HOTUNPLUG_DISK(2,002) correlation_id = 'test_hotunplug_disk' assert disk_attachment.update(types.DiskAttachment(active=False), query={'correlation_id': correlation_id}) assertions.assert_true_within_long( lambda: test_utils.all_jobs_finished(engine, correlation_id)) assertions.assert_true_within_short( lambda: disk_service.get().status == types.DiskStatus.OK) assertions.assert_true_within_short( lambda: disk_attachment.get().active == False)
def test_remove_backup_vm_and_backup_snapshot(engine_api): engine = engine_api.system_service() backup_vm_service = test_utils.get_vm_service(engine, BACKUP_VM_NAME) vm2_snapshots_service = test_utils.get_vm_snapshots_service( engine, VM2_NAME) vm2_snapshot = vm2_snapshots_service.list()[-1] # power-off backup-vm with engine_utils.wait_for_event(engine, [33, 61]): # VM_DOWN(61) event # USER_STOP_VM(33) event backup_vm_service.stop() assertions.assert_true_within_long( lambda: backup_vm_service.get().status == types.VmStatus.DOWN) # remove backup_vm num_of_vms = len(engine.vms_service().list()) backup_vm_service.remove() assert len(engine.vms_service().list()) == (num_of_vms - 1) with engine_utils.wait_for_event(engine, 342): # USER_REMOVE_SNAPSHOT event # remove vm2 snapshot vm2_snapshots_service.snapshot_service(vm2_snapshot.id).remove()
def perform_vm_backup(vm_backup_service, disks_service, disk, from_checkpoint_id=None, correlation_id=None): backup = vm_backup_service.add(types.Backup( disks=[types.Disk(id=disk.id)], from_checkpoint_id=from_checkpoint_id), query={'correlation_id': correlation_id}) backup_service = vm_backup_service.backup_service(backup.id) assertions.assert_true_within_long( lambda: backup_service.get().phase == types.BackupPhase.READY, allowed_exceptions=[sdk4.NotFoundError]) backup = backup_service.get() created_checkpoint_id = backup.to_checkpoint_id backup_service.finalize() assertions.assert_true_within_long( lambda: len(vm_backup_service.list()) == 0) assertions.assert_true_within_long(lambda: disks_service.disk_service( disk.id).get().status == types.DiskStatus.OK) return created_checkpoint_id
def test_incremental_backup_vm2(engine_api): engine = engine_api.system_service() disks_service = engine.disks_service() disk2 = disks_service.list(search='name={}'.format(DISK2_NAME))[0] vm2_backups_service = test_utils.get_vm_service( engine, VM2_NAME).backups_service() created_checkpoint_id = None # The first iteration will be a full VM backup (from_checkpoint_id=None) # and the second iteration will be an incremental VM backup. for _ in range(2): correlation_id = 'test_incremental_backup' backup = vm2_backups_service.add( types.Backup(disks=[types.Disk(id=disk2.id)], from_checkpoint_id=created_checkpoint_id), query={'correlation_id': correlation_id}) backup_service = vm2_backups_service.backup_service(backup.id) assertions.assert_true_within_long( lambda: backup_service.get().phase == types.BackupPhase.READY, allowed_exceptions=[sdk4.NotFoundError]) backup = backup_service.get() created_checkpoint_id = backup.to_checkpoint_id backup_service.finalize() assertions.assert_true_within_long( lambda: len(vm2_backups_service.list()) == 0) assertions.assert_true_within_long(lambda: disks_service.disk_service( disk2.id).get().status == types.DiskStatus.OK)
def add(api, domain, dc_name): system_service = api.system_service() sds_service = system_service.storage_domains_service() with engine_utils.wait_for_event(system_service, 956): # USER_ADD_STORAGE_DOMAIN(956) sd = sds_service.add(domain) sd_service = sds_service.storage_domain_service(sd.id) assertions.assert_true_within_long(lambda: sd_service.get( ).status == sdk4.types.StorageDomainStatus.UNATTACHED) data_centers = system_service.data_centers_service() dc = data_centers.list(search='name={}'.format(dc_name))[0] dc_service = data_centers.data_center_service(dc.id) attached_sds_service = dc_service.storage_domains_service() with engine_utils.wait_for_event(system_service, [966, 962]): # USER_ACTIVATED_STORAGE_DOMAIN(966) # USER_ATTACH_STORAGE_DOMAIN_TO_POOL(962) attached_sds_service.add(sdk4.types.StorageDomain(id=sd.id, ), ) attached_sd_service = attached_sds_service.storage_domain_service( sd.id) assertions.assert_true_within_long(lambda: attached_sd_service.get( ).status == sdk4.types.StorageDomainStatus.ACTIVE)
def test_live_storage_migration(engine_api): engine = engine_api.system_service() disk_service = test_utils.get_disk_service(engine, DISK0_NAME) correlation_id = 'live_storage_migration' disk_service.move(async=False, filter=False, storage_domain=types.StorageDomain(name=SD_ISCSI_NAME), query={'correlation_id': correlation_id}) assertions.assert_true_within_long( lambda: test_utils.all_jobs_finished(engine, correlation_id)) # Assert that the disk is on the correct storage domain, # its status is OK and the snapshot created for the migration # has been merged assertions.assert_true_within_long(lambda: engine_api.follow_link( disk_service.get().storage_domains[0]).name == SD_ISCSI_NAME) vm0_snapshots_service = test_utils.get_vm_snapshots_service( engine, VM0_NAME) assertions.assert_true_within_long( lambda: len(vm0_snapshots_service.list()) == 1) assertions.assert_true_within_long( lambda: disk_service.get().status == types.DiskStatus.OK)
def test_local_maintenance(hosts_service, get_vm_service_for_vm, ansible_host0): logging.info('Waiting For System Stability...') he_utils.wait_until_engine_vm_is_not_migrating(ansible_host0) vm_service = get_vm_service_for_vm(VM_HE_NAME) he_host_id = vm_service.get().host.id host_service = hosts_service.host_service(id=he_host_id) def do_verified_deactivation(): logging.debug(f'Trying to deactivate host {host_service.get().name}') try: host_service.deactivate() except ovirtsdk4.Error: # Ignore. Just return the result and let the caller fail if needed pass status = host_service.get().status hosted_engine = host_service.get(all_content=True).hosted_engine logging.debug(f'status={status}') logging.debug(f'hosted_engine={_hosted_engine_info(hosted_engine)}') # Original test was: # ( # status == types.HostStatus.MAINTENANCE or # hosted_engine.local_maintenance # ) # But this does not test local_maintenance (presumably the "local # maintenance" status as reported by the HA daemons?). # So I tried to change the "or" to "and" (require both), and it # never happened - local_maintenance always remained False. # Giving up on this for now and checking only status. # TODO: Find out why, fix what's needed, change the code to require # both. Also for do_verified_activation below. return status == types.HostStatus.MAINTENANCE logging.info('Performing Deactivation...') assertions.assert_true_within_long(do_verified_deactivation) def do_verified_activation(): logging.info(f'Trying to activate host {host_service.get().name}') try: host_service.activate() except ovirtsdk4.Error: # Ignore. Just return the result and let the caller fail if needed pass status = host_service.get().status hosted_engine = host_service.get(all_content=True).hosted_engine logging.debug(f'status={status}') logging.debug(f'hosted_engine={_hosted_engine_info(hosted_engine)}') # TODO See comment above return status == types.HostStatus.UP logging.info('Performing Activation...') assertions.assert_true_within_long(do_verified_activation) logging.info('Verifying that all hosts have score higher than 0...') assertions.assert_true_within_long( lambda: host_service.get(all_content=True).hosted_engine.score > 0 ) logging.info('Validating Migration...') prev_host_id = he_host_id he_host_id = vm_service.get().host.id assert prev_host_id != he_host_id
def wait_until_engine_vm_is_not_migrating(ansible_host): assertions.assert_true_within_long( lambda: not engine_vm_is_migrating(ansible_host))
def _verify_vm_state(engine, vm_name, state): vm_service = test_utils.get_vm_service(engine, vm_name) assertions.assert_true_within_long( lambda: vm_service.get().status == state) return vm_service
def snapshot_cold_merge(engine_api): engine = engine_api.system_service() vm1_snapshots_service = test_utils.get_vm_snapshots_service( engine, VM1_NAME) if vm1_snapshots_service is None: pytest.skip('Glance is not available') disk = engine.disks_service().list( search='name={} and vm_names={}'.format(DISK1_NAME, VM1_NAME))[0] dead_snap1_params = types.Snapshot( description=SNAPSHOT_DESC_1, persist_memorystate=False, disk_attachments=[types.DiskAttachment(disk=types.Disk(id=disk.id))]) correlation_id = uuid.uuid4() vm1_snapshots_service.add(dead_snap1_params, query={'correlation_id': correlation_id}) assertions.assert_true_within_long( lambda: test_utils.all_jobs_finished(engine, correlation_id)) assertions.assert_true_within_long(lambda: vm1_snapshots_service.list()[ -1].snapshot_status == types.SnapshotStatus.OK) dead_snap2_params = types.Snapshot( description=SNAPSHOT_DESC_2, persist_memorystate=False, disk_attachments=[types.DiskAttachment(disk=types.Disk(id=disk.id))]) correlation_id_snap2 = uuid.uuid4() vm1_snapshots_service.add(dead_snap2_params, query={'correlation_id': correlation_id_snap2}) assertions.assert_true_within_long( lambda: test_utils.all_jobs_finished(engine, correlation_id_snap2)) assertions.assert_true_within_long(lambda: vm1_snapshots_service.list()[ -1].snapshot_status == types.SnapshotStatus.OK) snapshot = vm1_snapshots_service.list()[-2] vm1_snapshots_service.snapshot_service(snapshot.id).remove() assertions.assert_true_within_long( lambda: len(vm1_snapshots_service.list()) == 2) assertions.assert_true_within_long(lambda: vm1_snapshots_service.list()[ -1].snapshot_status == types.SnapshotStatus.OK)
def test_verify_vm_import(engine_api): engine = engine_api.system_service() correlation_id = "test_validate_ova_import_vm" assertions.assert_true_within_long( lambda: test_utils.all_jobs_finished(engine, correlation_id)) _verify_vm_state(engine, IMPORTED_VM_NAME, types.VmStatus.DOWN)
def test_verify_vm1_exported(engine_api): engine = engine_api.system_service() vm1_snapshots_service = test_utils.get_vm_snapshots_service( engine, VM1_NAME) assertions.assert_true_within_long( lambda: len(vm1_snapshots_service.list()) == 1, )