def _verify_vm_state(engine, vm_name, state): vm_service = test_utils.get_vm_service(engine, vm_name) testlib.assert_true_within_long( lambda: vm_service.get().status == state ) return vm_service
def verify_add_hosts_4(api): hosts_service = api.system_service().hosts_service() total_hosts = hosts_service.list(search='datacenter={}'.format(DC_NAME)) testlib.assert_true_within_long( lambda: _single_host_up(hosts_service, total_hosts) )
def restore_vm0_networking(ovirt_prefix): # Networking may not work after resume. We need this pseudo-test for the # purpose of reviving VM networking by rebooting the VM. We must be # careful to reboot just the guest OS, not to restart the whole VM, to keep # checking for contingent failures after resume. # A better solution might be using a guest OS other than Cirros. try: if _vm_ssh(ovirt_prefix, VM0_NAME, ['true'], tries=1).code == 0: return except getattr(ssh, 'LagoSSHTimeoutException', RuntimeError): # May happen on timeout, e.g. when networking is not working at all. pass host = _vm_host(ovirt_prefix, VM0_NAME) uri = 'qemu+tls://%s/system' % host.name() ret = host.ssh(['virsh', '-c', uri, 'reboot', '--mode', 'acpi', VM0_NAME]) nt.assert_equals(ret.code, EX_OK) testlib.assert_true_within_long( lambda: _ping(ovirt_prefix, VM0_PING_DEST) == EX_OK ) engine = ovirt_prefix.virt_env.engine_vm().get_api_v4().system_service() _verify_vm_state(engine, VM0_NAME, types.VmStatus.UP) nt.assert_equals(_vm_ssh(ovirt_prefix, VM0_NAME, ['true']).code, 0)
def template_export(api): engine = api.system_service() template_cirros = test_utils.get_template_service(engine, TEMPLATE_CIRROS) if template_cirros is None: raise SkipTest('{0}: template {1} is missing'.format( template_export.__name__, TEMPLATE_CIRROS ) ) storage_domain = engine.storage_domains_service().list(search='name={}'.format(SD_TEMPLATES_NAME))[0] with test_utils.TestEvent(engine, 1164): # IMPORTEXPORT_STARTING_EXPORT_TEMPLATE event template_cirros.export( storage_domain=types.StorageDomain( id=storage_domain.id, ), ) with test_utils.TestEvent(engine, 1156): # IMPORTEXPORT_EXPORT_TEMPLATE event testlib.assert_true_within_long( lambda: template_cirros.get().status == types.TemplateStatus.OK, )
def add_vm_template(api): #TODO: Fix the exported domain generation raise SkipTest('Exported domain generation not supported yet') vm_params = params.VM( name=VM1_NAME, memory=512 * MB, cluster=params.Cluster( name=TEST_CLUSTER, ), template=params.Template( name=TEMPLATE_CENTOS7, ), display=params.Display( type_='spice', ), ) api.vms.add(vm_params) testlib.assert_true_within_long( lambda: api.vms.get(VM1_NAME).status.state == 'down', ) disk_name = api.vms.get(VM1_NAME).disks.list()[0].name testlib.assert_true_within_long( lambda: api.vms.get(VM1_NAME).disks.get(disk_name).status.state == 'ok' )
def test_initialize_engine(prefix): engine = prefix.virt_env.engine_vm() answer_file_src = os.path.join( os.environ.get('SUITE'), 'engine-answer-file.conf' ) engine.copy_to( answer_file_src, '/tmp/answer-file', ) result = engine.ssh( [ 'engine-setup', '--config-append=/tmp/answer-file', '--offline' ], ) if result.code != 0: return result.code testlib.assert_true_within_long( lambda: engine.service('ovirt-engine').alive() )
def _add_storage_domain_4(api, p): sds_service = api.system_service().storage_domains_service() sd = sds_service.add(p) sd_service = sds_service.storage_domain_service(sd.id) def _is_sd_unattached(): usd = sd_service.get() if usd.status == sdk4.types.StorageDomainStatus.UNATTACHED: return True testlib.assert_true_within_long( _is_sd_unattached ) dcs_service = api.system_service().data_centers_service() dc = dcs_service.list(search='name=%s' % DC_NAME)[0] dc_service = dcs_service.data_center_service(dc.id) attached_sds_service = dc_service.storage_domains_service() attached_sds_service.add( sdk4.types.StorageDomain( id=sd.id, ), ) attached_sd_service = attached_sds_service.storage_domain_service(sd.id) def _is_sd_active(): asd = attached_sd_service.get() if asd.status == sdk4.types.StorageDomainStatus.ACTIVE: return True testlib.assert_true_within_long( _is_sd_active )
def vm_run(prefix): engine = prefix.virt_env.engine_vm() api = engine.get_api() host_names = [h.name() for h in prefix.virt_env.host_vms()] start_params = params.Action( use_cloud_init=True, vm=params.VM( placement_policy=params.VmPlacementPolicy( host=params.Host( name=sorted(host_names)[0] ), ), initialization=params.Initialization( domain=params.Domain( name='lago.example.com' ), cloud_init=params.CloudInit( host=params.Host( address='VM0' ), ), ), ), ) api.vms.get(VM0_NAME).start(start_params) testlib.assert_true_within_long( lambda: api.vms.get(VM0_NAME).status.state == 'up', )
def generic_import_from_glance(api, image_name=CIRROS_IMAGE_NAME, as_template=False, image_ext='_glance_disk', template_ext='_glance_template', dest_storage_domain=MASTER_SD_TYPE, dest_cluster=CLUSTER_NAME): glance_provider = api.storagedomains.get(SD_GLANCE_NAME) target_image = glance_provider.images.get(name=image_name) disk_name = image_name.replace(" ", "_") + image_ext template_name = image_name.replace(" ", "_") + template_ext import_action = params.Action( storage_domain=params.StorageDomain( name=dest_storage_domain, ), cluster=params.Cluster( name=dest_cluster, ), import_as_template=as_template, disk=params.Disk( name=disk_name, ), template=params.Template( name=template_name, ), ) nt.assert_true( target_image.import_image(import_action) ) testlib.assert_true_within_long( lambda: api.disks.get(disk_name).status.state == 'ok', )
def _add_storage_domain(api, p): system_service = api.system_service() sds_service = system_service.storage_domains_service() with test_utils.TestEvent(system_service, 956): # USER_ADD_STORAGE_DOMAIN(956) sd = sds_service.add(p) sd_service = sds_service.storage_domain_service(sd.id) testlib.assert_true_within_long( lambda: sd_service.get().status == sdk4.types.StorageDomainStatus.UNATTACHED ) dc_service = test_utils.data_center_service(system_service, DC_NAME) attached_sds_service = dc_service.storage_domains_service() with test_utils.TestEvent(system_service, [966, 962]): # USER_ACTIVATED_STORAGE_DOMAIN(966) # USER_ATTACH_STORAGE_DOMAIN_TO_POOL(962) attached_sds_service.add( sdk4.types.StorageDomain( id=sd.id, ), ) attached_sd_service = attached_sds_service.storage_domain_service(sd.id) testlib.assert_true_within_long( lambda: attached_sd_service.get().status == sdk4.types.StorageDomainStatus.ACTIVE )
def _add_storage_domain_4(api, p): sds_service = api.system_service().storage_domains_service() sd = sds_service.add(p) sd_service = sds_service.storage_domain_service(sd.id) testlib.assert_true_within_long( lambda: sd_service.get().status == sdk4.types.StorageDomainStatus.UNATTACHED ) dcs_service = api.system_service().data_centers_service() dc = dcs_service.list(search='name=%s' % DC_NAME)[0] dc_service = dcs_service.data_center_service(dc.id) attached_sds_service = dc_service.storage_domains_service() attached_sds_service.add( sdk4.types.StorageDomain( id=sd.id, ), ) attached_sd_service = attached_sds_service.storage_domain_service(sd.id) testlib.assert_true_within_long( lambda: attached_sd_service.get().status == sdk4.types.StorageDomainStatus.ACTIVE )
def next_run_unplug_cpu(api): engine = api.system_service() vm_service = test_utils.get_vm_service(engine, VM0_NAME) new_cpu = vm_service.get().cpu new_cpu.topology.sockets = 1 vm_service.update( vm=types.Vm( cpu=new_cpu, ), next_run=True ) nt.assert_true( vm_service.get().cpu.topology.sockets == 2 ) nt.assert_true( vm_service.get(next_run=True).cpu.topology.sockets == 1 ) with test_utils.TestEvent(engine, 157): # USER_REBOOT_VM(157) vm_service.reboot() testlib.assert_true_within_long( lambda: vm_service.get().status == types.VmStatus.UP ) nt.assert_true( vm_service.get().cpu.topology.sockets == 1 )
def add_snapshot_for_backup(api): engine = api.system_service() vm2_disk_attachments_service = test_utils.get_disk_attachments_service(engine, VM2_NAME) disk = vm2_disk_attachments_service.list()[0] backup_snapshot_params = types.Snapshot( description=SNAPSHOT_FOR_BACKUP_VM, persist_memorystate=False, disk_attachments=[ types.DiskAttachment( disk=types.Disk( id=disk.id ) ) ] ) vm2_snapshots_service = test_utils.get_vm_snapshots_service(engine, VM2_NAME) correlation_id = uuid.uuid4() with test_utils.TestEvent(engine, [45, 68]): # USER_CREATE_SNAPSHOT(41) event # USER_CREATE_SNAPSHOT_FINISHED_SUCCESS(68) event vm2_snapshots_service.add(backup_snapshot_params, query={'correlation_id': correlation_id}) testlib.assert_true_within_long( lambda: test_utils.all_jobs_finished(engine, correlation_id) ) testlib.assert_true_within_long( lambda: vm2_snapshots_service.list()[-1].snapshot_status == types.SnapshotStatus.OK, )
def verify_backup_snapshot_removed(api): engine = api.system_service() vm2_snapshots_service = test_utils.get_vm_snapshots_service(engine, VM2_NAME) testlib.assert_true_within_long( lambda: len(vm2_snapshots_service.list()) == 1 )
def run_vms(prefix): engine = prefix.virt_env.engine_vm() api = engine.get_api() vm_ip = '.'.join(engine.ip().split('.')[0:3] + ['199']) vm_gw = '.'.join(engine.ip().split('.')[0:3] + ['1']) host_names = [h.name() for h in prefix.virt_env.host_vms()] start_params = params.Action( use_cloud_init=True, vm=params.VM( initialization=params.Initialization( domain=params.Domain( name='lago.example.com' ), cloud_init=params.CloudInit( host=params.Host( address='VM0' ), users=params.Users( active=True, user=[params.User( user_name='root', password='******' )] ), network_configuration=params.NetworkConfiguration( nics=params.Nics( nic=[params.NIC( name='eth0', boot_protocol='STATIC', on_boot=True, network=params.Network( ip=params.IP( address=vm_ip, netmask='255.255.255.0', gateway=vm_gw, ), ), )] ), ), ), ), ), ) api.vms.get(VM0_NAME).start(start_params) api.vms.get(BACKUP_VM_NAME).start(start_params) start_params.vm.initialization.cloud_init=params.CloudInit( host=params.Host( address='VM2' ), ) api.vms.get(VM2_NAME).start(start_params) testlib.assert_true_within_long( lambda: api.vms.get(VM0_NAME).status.state == 'up' and api.vms.get(BACKUP_VM_NAME).status.state == 'up', ) assert_vm_is_alive(prefix, VM0_NAME)
def _update_cluster_version(api, new_version): engine = api.system_service() clusters_service = engine.clusters_service() cluster = clusters_service.list(search=TEST_CLUSTER)[0] cluster_service = clusters_service.cluster_service(cluster.id) vms_service = engine.vms_service() old_version = types.Version( major=cluster.version.major, minor=cluster.version.minor ) cluster_service.update( cluster=types.Cluster( version=new_version ) ) updating_version = clusters_service.list(search=TEST_CLUSTER)[0].version nt.assert_true( updating_version.major == new_version.major and updating_version.minor == new_version.minor ) down_vm = vms_service.list(search=VM1_NAME)[0] nt.assert_true(down_vm.custom_compatibility_version is None) up_vm = vms_service.list(search=VM0_NAME)[0] nt.assert_true( up_vm.custom_compatibility_version.major == old_version.major and up_vm.custom_compatibility_version.minor == old_version.minor ) nt.assert_true(up_vm.next_run_configuration_exists) events = engine.events_service() last_event = int(events.list(max=2)[0].id) vm_service = vms_service.vm_service(up_vm.id) vm_service.stop() testlib.assert_true_within_short( lambda: vms_service.list(search=VM0_NAME)[0].status == types.VmStatus.DOWN ) events = engine.events_service() testlib.assert_true_within_long( lambda: (next(e for e in events.list(from_=last_event) if e.code == 253)).code == 253, allowed_exceptions=[StopIteration] ) vm_service.start() testlib.assert_true_within_short( lambda: vms_service.list(search=VM0_NAME)[0].status == types.VmStatus.UP ) up_vm = vms_service.list(search=VM0_NAME)[0] nt.assert_false(up_vm.next_run_configuration_exists) nt.assert_true(up_vm.custom_compatibility_version is None)
def _wait_datacenter_up(api): engine = api.system_service() dcs = engine.data_centers_service() test_dc = dcs.data_center_service(dcs.list(search=TEST_DC)[0].id) testlib.assert_true_within_long( lambda: test_dc.get().status == types.DataCenterStatus.UP )
def _wait_for_engine_health(host): logging.info("Waiting for engine to start...") testlib.assert_true_within_long(lambda: any( v["engine-status"]["health"] == "good" for k, v in _get_he_status(host).items() if k.isdigit() )) logging.info("Engine is running.")
def test_initialize_engine(prefix): engine = prefix.virt_env.engine_vm() answer_file_src = os.path.join( os.environ.get('SUITE'), 'engine-answer-file.conf' ) engine.copy_to( answer_file_src, '/tmp/answer-file', ) result = engine.ssh( [ 'OTOPI_DEBUG=1', 'engine-setup', '--config-append=/tmp/answer-file', '--accept-defaults', ], ) engine.ssh( [ 'ss', '-anp', ], ) nt.eq_( result.code, 0, 'engine-setup failed. Exit code is %s' % result.code ) # Remove YUM leftovers that are in /dev/shm/* - just takes up memory. result = engine.ssh( [ 'rm', '-rf', '/dev/shm/yum', '/dev/shm/yumdb', '/dev/shm/*.rpm', ] ) #TODO: set iSCSI, NFS, LDAP ports in firewall & re-enable it. result = engine.ssh( [ 'systemctl', 'stop', 'firewalld', ], ) nt.eq_( result.code, 0, 'firwalld not stopped. Exit code is %s' % result.code ) testlib.assert_true_within_long( lambda: engine.service('ovirt-engine').alive() )
def check_snapshot_with_memory(api): engine = api.system_service() vm_service = test_utils.get_vm_service(engine, VM0_NAME) testlib.assert_true_within_long( lambda: test_utils.get_snapshot(engine, VM0_NAME, SNAPSHOT_DESC_MEM).snapshot_status == types.SnapshotStatus.IN_PREVIEW ) vm_service.start() _verify_vm_state(engine, VM0_NAME, types.VmStatus.UP)
def _wait_for_engine_maintenance(host, he_index, value): logging.info("Waiting for Engine Maintenance to reset...") time.sleep(2) testlib.assert_true_within_long(lambda: _get_he_status(host) [he_index]["maintenance"] is value) testlib.assert_true_within_long( lambda: _is_state_maintenance(host, "LocalMaintenance") is False ) logging.info("Engine Maintenance is reset.")
def verify_add_all_hosts(prefix): api = prefix.virt_env.engine_vm().get_api_v4() hosts_service = api.system_service().hosts_service() total_hosts = hosts_service.list(search='datacenter={}'.format(DC_NAME)) testlib.assert_true_within_long( lambda: _all_hosts_up(hosts_service, total_hosts) ) if not USE_VDSMFAKE: for host in prefix.virt_env.host_vms(): host.ssh(['rm', '-rf', '/dev/shm/yum', '/dev/shm/*.rpm'])
def engine_restart(prefix): engine = prefix.virt_env.engine_vm() engine.service('ovirt-engine')._request_stop() testlib.assert_true_within_long( lambda: not engine.service('ovirt-engine').alive() ) engine.service('ovirt-engine')._request_start() testlib.assert_true_within_long( lambda: engine.service('ovirt-engine').alive() )
def clear_global_maintenance(prefix): logging.info("Waiting For System Stability...") time.sleep(wait_value) host = prefix.virt_env.host_vms()[0] testlib.assert_true_within_short( lambda: _set_and_test_maintenance_mode(host, False) ) testlib.assert_true_within_long( lambda: _is_state_maintenance(host, "GlobalMaintenance") is False )
def preview_snapshot_with_memory(api): engine = api.system_service() events = engine.events_service() testlib.assert_true_within_long( # wait for event 68 == USER_CREATE_SNAPSHOT_FINISHED_SUCCESS lambda: any(e.code == 68 for e in events.list(max=6)) ) vm_service = test_utils.get_vm_service(engine, VM0_NAME) vm_service.stop() _verify_vm_state(engine, VM0_NAME, types.VmStatus.DOWN) snapshot = test_utils.get_snapshot(engine, VM0_NAME, SNAPSHOT_DESC_MEM) vm_service.preview_snapshot(snapshot=snapshot, async=False, restore_memory=True)
def _wait_for_engine_migration(host, he_index, health, state): logging.info("Waiting for engine to migrate...") testlib.assert_true_within_long(lambda: _get_he_status(host) [he_index]["engine-status"]["health"] == health) testlib.assert_true_within_long( lambda: _check_migration_state(host, state) is False ) logging.info("Engine has migrated.") logging.info("Waiting For System Stability...") time.sleep(wait_value)
def add_vm_template(api): #TODO: Fix the exported domain generation. #For the time being, add VM from Glance imported template. if api.templates.get(name=TEMPLATE_CIRROS) is None: raise SkipTest('%s: template %s not available.' % (add_vm_template.__name__, TEMPLATE_CIRROS)) vm_memory = 512 * MB vm_params = params.VM( name=VM1_NAME, description='CirrOS imported from Glance as Template', memory=vm_memory, cluster=params.Cluster( name=TEST_CLUSTER, ), template=params.Template( name=TEMPLATE_CIRROS, ), display=params.Display( type_='vnc', ), memory_policy=params.MemoryPolicy( guaranteed=vm_memory / 2, ballooning=False, ), os=params.OperatingSystem( type_='other_linux', ), timezone='Etc/GMT', type_='server', serial_number=params.SerialNumber( policy='custom', value='12345678', ), cpu=params.CPU( architecture='X86_64', topology=params.CpuTopology( cores=1, threads=2, sockets=1, ), ), ) api.vms.add(vm_params) testlib.assert_true_within_long( lambda: api.vms.get(VM1_NAME).status.state == 'down', ) disk_name = api.vms.get(VM1_NAME).disks.list()[0].name testlib.assert_true_within_long( lambda: api.vms.get(VM1_NAME).disks.get(disk_name).status.state == 'ok' )
def local_maintenance(prefix, api): logging.info("Waiting For System Stability...") time.sleep(wait_value) engine = api.system_service() vm_service = test_utils.get_vm_service(engine, VM_HE_NAME) hosts_service = engine.hosts_service() def _current_running_host(): host_id = vm_service.get().host.id host = hosts_service.list( search='id={}'.format(host_id))[0] return host he_host = _current_running_host() host_service = hosts_service.host_service(id=he_host.id) prev_host_id = he_host.id logging.info("Performing Deactivation...") host_service.deactivate() testlib.assert_true_within_long( lambda: host_service.get().status == types.HostStatus.MAINTENANCE or host_service.get(all_content=True).hosted_engine.local_maintenance ) logging.info("Performing Activation...") host_service.activate() testlib.assert_true_within_long( lambda: host_service.get().status == types.HostStatus.UNASSIGNED ) logging.info("Waiting For System Stability...") time.sleep(wait_value) logging.info("Waiting For Maintenance...") testlib.assert_true_within_long( lambda: not host_service.get(all_content=True).hosted_engine.local_maintenance ) logging.info("Waiting For Score...") testlib.assert_true_within_long( lambda: host_service.get(all_content=True).hosted_engine.score > 0 ) logging.info("Validating Migration...") he_host = _current_running_host() testlib.assert_true_within_short( lambda: prev_host_id != he_host.id )
def live_storage_migration(api): engine = api.system_service() disk_service = test_utils.get_disk_service(engine, DISK0_NAME) correlation_id = uuid.uuid4() disk_service.move( async=False, filter=False, storage_domain=types.StorageDomain( name=SD_ISCSI_NAME ), query={'correlation_id': correlation_id} ) testlib.assert_true_within_long(lambda: test_utils.all_jobs_finished(engine, correlation_id)) # Assert that the disk is on the correct storage domain, # its status is OK and the snapshot created for the migration # has been merged testlib.assert_true_within_long( lambda: api.follow_link(disk_service.get().storage_domains[0]).name == SD_ISCSI_NAME ) vm0_snapshots_service = test_utils.get_vm_snapshots_service(engine, VM0_NAME) testlib.assert_true_within_long( lambda: len(vm0_snapshots_service.list()) == 1 ) testlib.assert_true_within_long( lambda: disk_service.get().status == types.DiskStatus.OK )
def add_ldap_provider(prefix): engine = prefix.virt_env.engine_vm() machine_389ds = prefix.virt_env.get_vm(HOSTNAME_389DS) answer_file_src = os.path.join( os.environ.get('SUITE'), 'aaa-ldap-answer-file.conf' ) with open(answer_file_src, 'r') as f: content = f.read() content = content.replace('@389DS_IP@', machine_389ds.ip()) with tempfile.NamedTemporaryFile(delete=False) as temp: temp.write(content) engine.copy_to(temp.name, '/root/aaa-ldap-answer-file.conf') os.unlink(temp.name) result = machine_389ds.ssh( [ 'systemctl', 'start', 'dirsrv@lago', ], ) nt.eq_( result.code, 0, 'Failed to start LDAP server. Exit code %s' % result.code ) result = engine.ssh( [ 'ovirt-engine-extension-aaa-ldap-setup', '--config-append=/root/aaa-ldap-answer-file.conf', '--log=/var/log/ovirt-engine-extension-aaa-ldap-setup.log', ], ) nt.eq_( result.code, 0, 'aaa-ldap-setup failed. Exit code is %s' % result.code ) engine.service('ovirt-engine')._request_stop() testlib.assert_true_within_long( lambda: not engine.service('ovirt-engine').alive() ) engine.service('ovirt-engine')._request_start() testlib.assert_true_within_long( lambda: engine.service('ovirt-engine').alive() )
def suspend_resume_vm0(prefix): vm_host = _vm_host(prefix, VM0_NAME) ret = vm_host.ssh(['tail', '-1', VDSM_LOG]) nt.assert_equals(ret.code, EX_OK) log_items = ret.out.split() global _log_time_before_suspend _log_time_before_suspend = log_items[0] + ' ' + log_items[1] # date + time assert_vm0_is_alive(prefix) api = prefix.virt_env.engine_vm().get_api_v4() vm_service = test_utils.get_vm_service(api.system_service(), VM0_NAME) vm_service.suspend() testlib.assert_true_within_long( lambda: vm_service.get().status == types.VmStatus.SUSPENDED) vm_service.start()
def template_export(api): templates_service = api.system_service().templates_service() template_cirros = templates_service.template_service( templates_service.list(search=TEMPLATE_CIRROS)[0].id) if template_cirros is None: raise SkipTest('{0}: template {1} is missing'.format( template_export.__name__, TEMPLATE_CIRROS)) storage_domain = api.system_service().storage_domains_service().list( search=SD_TEMPLATES_NAME)[0] template_cirros.export(storage_domain=types.StorageDomain( id=storage_domain.id, ), ) template_id = template_cirros.get().id template_service = templates_service.template_service(template_id) testlib.assert_true_within_long( lambda: template_service.get().status == types.TemplateStatus.OK, )
def update_template_version(api): engine = api.system_service() vms_service = engine.vms_service() stateless_vm = vms_service.list(search=VM1_NAME)[0] templates_service = engine.templates_service() template = templates_service.list(search=TEMPLATE_CIRROS)[0] nt.assert_true(stateless_vm.memory != template.memory) templates_service.add( template=types.Template(name=TEMPLATE_CIRROS, vm=stateless_vm, version=types.TemplateVersion( base_template=template, version_number=2))) pools_service = engine.vm_pools_service() testlib.assert_true_within_long(lambda: pools_service.list( search=VMPOOL_NAME)[0].vm.memory == stateless_vm.memory)
def test_initialize_engine(prefix): engine = prefix.virt_env.engine_vm() answer_file_src = os.path.join(os.environ.get('SUITE'), 'engine-answer-file.conf') engine.copy_to( answer_file_src, '/tmp/answer-file', ) result = engine.ssh( ['engine-setup', '--config-append=/tmp/answer-file', '--offline'], ) if result.code != 0: return result.code testlib.assert_true_within_long( lambda: engine.service('ovirt-engine').alive())
def _add_storage_domain_4(api, p): system_service = api.system_service() sds_service = system_service.storage_domains_service() sd = sds_service.add(p) sd_service = sds_service.storage_domain_service(sd.id) testlib.assert_true_within_long(lambda: sd_service.get().status == sdk4. types.StorageDomainStatus.UNATTACHED) dcs_service = system_service.data_centers_service() dc = dcs_service.list(search='name=%s' % DC_NAME)[0] dc_service = dcs_service.data_center_service(dc.id) attached_sds_service = dc_service.storage_domains_service() attached_sds_service.add(sdk4.types.StorageDomain(id=sd.id, ), ) attached_sd_service = attached_sds_service.storage_domain_service(sd.id) testlib.assert_true_within_long(lambda: attached_sd_service.get().status == sdk4.types.StorageDomainStatus.ACTIVE)
def ha_recovery(prefix): engine = prefix.virt_env.engine_vm().get_api_v4().system_service() last_event = int(engine.events_service().list(max=2)[0].id) vms_service = engine.vms_service() vm = vms_service.list(search=VM2_NAME)[0] host_name = engine.hosts_service().host_service(vm.host.id).get().name vm_host = prefix.virt_env.get_vm(host_name) pid = vm_host.ssh(['pgrep', '-f', 'qemu.*guest=vm2']) vm_host.ssh(['kill', '-KILL', pid.out]) events = engine.events_service() testlib.assert_true_within_short( lambda: (next(e for e in events.list(from_=last_event) if e.code == 9602)).code == 9602, allowed_exceptions=[StopIteration]) vm_service = vms_service.vm_service(vm.id) testlib.assert_true_within_long( lambda: vm_service.get().status == types.VmStatus.UP) vm_service.stop()
def template_export(api): template_cirros = api.templates.get(TEMPLATE_CIRROS) if template_cirros is None: raise SkipTest('{0}: template {1} is missing'.format( template_export.__name__, TEMPLATE_CIRROS ) ) template_cirros.export( params.Action( storage_domain=api.storagedomains.get(SD_TEMPLATES_NAME) ) ) testlib.assert_true_within_long( lambda: api.templates.get(TEMPLATE_CIRROS).status.state == 'ok', )
def test_update_template_version(api_v4): engine = api_v4.system_service() stateless_vm = engine.vms_service().list( search='name={}'.format(VM1_NAME))[0] templates_service = engine.templates_service() template = templates_service.list( search='name={}'.format(TEMPLATE_GUEST))[0] assert stateless_vm.memory != template.memory templates_service.add( template=types.Template(name=TEMPLATE_GUEST, vm=stateless_vm, version=types.TemplateVersion( base_template=template, version_number=2))) pool_service = test_utils.get_pool_service(engine, VMPOOL_NAME) testlib.assert_true_within_long( lambda: pool_service.get().vm.memory == stateless_vm.memory)
def test_initialize_engine(prefix): engine = prefix.virt_env.engine_vm() answer_file_src = os.path.join(os.environ.get('SUITE'), 'upgrade-engine-answer-file.conf') engine.copy_to( answer_file_src, '/tmp/answer-file-post', ) _execute_on_engine(engine, ['yum', 'clean', 'all']) _execute_on_engine(engine, ['yum', '-y', 'update', 'ovirt-*setup*'], error_message="yum update of ovirt-*setup packages") _execute_on_engine(engine, [ 'engine-setup', '--config-append=/tmp/answer-file-post', '--accept-defaults', '--otopi-environment=OVESETUP_SYSTEM/memCheckEnabled=bool:False' ], error_message="engine-setup", run_ss=True) # yum update after engine upgrade _execute_on_engine(engine, ['yum', 'clean', 'all']) _execute_on_engine(engine, ['yum', '-y', 'update'], error_message="yum update") # reboot engine and wait for it to start _execute_on_engine(engine, ["reboot"]) _wait_for_engine_command(engine, ["uptime"]) # Remove YUM leftovers that are in /dev/shm/* - just takes up memory. _execute_on_engine( engine, ['rm', '-rf', '/dev/shm/yum', '/dev/shm/yumdb', '/dev/shm/*.rpm']) # TODO: set iSCSI, NFS, LDAP ports in firewall & re-enable it. _execute_on_engine(engine, ['systemctl', 'stop', 'firewalld'], error_message="Stopping firewalld") testlib.assert_true_within_long( lambda: engine.service('ovirt-engine').alive()) testlib.assert_true_within_short( lambda: engine.service('ovirt-engine-dwhd').alive())
def _update_cluster_version(api, new_version): engine = api.system_service() clusters_service = engine.clusters_service() cluster = clusters_service.list(search=TEST_CLUSTER)[0] cluster_service = clusters_service.cluster_service(cluster.id) vms_service = engine.vms_service() old_version = types.Version(major=cluster.version.major, minor=cluster.version.minor) cluster_service.update(cluster=types.Cluster(version=new_version)) updating_version = clusters_service.list(search=TEST_CLUSTER)[0].version nt.assert_true(updating_version.major == new_version.major and updating_version.minor == new_version.minor) down_vm = vms_service.list(search=VM1_NAME)[0] nt.assert_true(down_vm.custom_compatibility_version is None) up_vm = vms_service.list(search=VM0_NAME)[0] nt.assert_true( up_vm.custom_compatibility_version.major == old_version.major and up_vm.custom_compatibility_version.minor == old_version.minor) nt.assert_true(up_vm.next_run_configuration_exists) events = engine.events_service() last_event = int(events.list(max=2)[0].id) vm_service = vms_service.vm_service(up_vm.id) vm_service.stop() testlib.assert_true_within_short(lambda: vms_service.list(search=VM0_NAME)[ 0].status == types.VmStatus.DOWN) events = engine.events_service() testlib.assert_true_within_long( lambda: (next(e for e in events.list(from_=last_event) if e.code == 253)).code == 253, allowed_exceptions=[StopIteration]) vm_service.start() testlib.assert_true_within_short(lambda: vms_service.list(search=VM0_NAME)[ 0].status == types.VmStatus.UP) up_vm = vms_service.list(search=VM0_NAME)[0] nt.assert_false(up_vm.next_run_configuration_exists) nt.assert_true(up_vm.custom_compatibility_version is None)
def restore_vm0_networking(ovirt_prefix): # Networking may not work after resume. We need this pseudo-test for the # purpose of reviving VM networking by rebooting the VM. We must be # careful to reboot just the guest OS, not to restart the whole VM, to keep # checking for contingent failures after resume. # A better solution might be using a guest OS other than Cirros. if _ping(ovirt_prefix, VM0_PING_DEST) == EX_OK: return host = _vm_host(ovirt_prefix, VM0_NAME) uri = 'qemu+tls://%s/system' % host.name() ret = host.ssh(['virsh', '-c', uri, 'reboot', '--mode', 'acpi', VM0_NAME]) nt.assert_equals(ret.code, EX_OK) testlib.assert_true_within_long( lambda: _ping(ovirt_prefix, VM0_PING_DEST) == EX_OK) engine = ovirt_prefix.virt_env.engine_vm().get_api_v4().system_service() vm_service = test_utils.get_vm_service(engine, VM0_NAME) testlib.assert_true_within_long( lambda: vm_service.get().status == types.VmStatus.UP)
def local_maintenance(prefix, api): logging.info("Waiting For System Stability...") time.sleep(wait_value) engine = api.system_service() vm_service = test_utils.get_vm_service(engine, VM_HE_NAME) hosts_service = engine.hosts_service() def _current_running_host(): host_id = vm_service.get().host.id host = hosts_service.list(search='id={}'.format(host_id))[0] return host he_host = _current_running_host() host_service = hosts_service.host_service(id=he_host.id) prev_host_id = he_host.id logging.info("Performing Deactivation...") host_service.deactivate() testlib.assert_true_within_long( lambda: host_service.get().status == types.HostStatus.MAINTENANCE or host_service.get(all_content=True).hosted_engine.local_maintenance) logging.info("Performing Activation...") host_service.activate() testlib.assert_true_within_long( lambda: host_service.get().status == types.HostStatus.UNASSIGNED) logging.info("Waiting For System Stability...") time.sleep(wait_value) logging.info("Waiting For Maintenance...") testlib.assert_true_within_long(lambda: not host_service.get( all_content=True).hosted_engine.local_maintenance) logging.info("Waiting For Score...") testlib.assert_true_within_long( lambda: host_service.get(all_content=True).hosted_engine.score > 0) logging.info("Validating Migration...") he_host = _current_running_host() testlib.assert_true_within_short(lambda: prev_host_id != he_host.id)
def vm_run(prefix): engine = prefix.virt_env.engine_vm() api = engine.get_api() host_names = [h.name() for h in prefix.virt_env.host_vms()] start_params = params.Action( use_cloud_init=True, vm=params.VM( placement_policy=params.VmPlacementPolicy( host=params.Host(name=sorted(host_names)[0]), ), initialization=params.Initialization( domain=params.Domain(name='lago.example.com'), cloud_init=params.CloudInit(host=params.Host(address='VM0'), ), ), ), ) api.vms.get(VM0_NAME).start(start_params) testlib.assert_true_within_long( lambda: api.vms.get(VM0_NAME).status.state == 'up', )
def test_initialize_engine(prefix): engine = prefix.virt_env.engine_vm() answer_file_src = os.path.join(os.environ.get('SUITE'), 'engine-answer-file.conf') engine.copy_to( answer_file_src, '/tmp/answer-file', ) result = engine.ssh([ 'OTOPI_DEBUG=1', 'engine-setup', '--config-append=/tmp/answer-file', '--accept-defaults', ], ) engine.ssh([ 'ss', '-anp', ], ) nt.eq_(result.code, 0, 'engine-setup failed. Exit code is %s' % result.code) # Remove YUM leftovers that are in /dev/shm/* - just takes up memory. result = engine.ssh([ 'rm', '-rf', '/dev/shm/yum', '/dev/shm/yumdb', '/dev/shm/*.rpm', ]) #TODO: set iSCSI, NFS, LDAP ports in firewall & re-enable it. result = engine.ssh([ 'systemctl', 'stop', 'firewalld', ], ) nt.eq_(result.code, 0, 'firwalld not stopped. Exit code is %s' % result.code) testlib.assert_true_within_long( lambda: engine.service('ovirt-engine').alive())
def test_initialize_engine(prefix): engine = prefix.virt_env.engine_vm() answer_file_src = os.path.join(os.environ.get('SUITE'), 'engine-answer-file.conf') engine.copy_to( answer_file_src, '/tmp/answer-file', ) result = engine.ssh([ 'engine-setup', '--config-append=/tmp/answer-file', ], ) nt.eq_(result.code, 0, 'engine-setup failed. Exit code is %s' % result.code) testlib.assert_true_within_long( lambda: engine.service('ovirt-engine').alive())
def vm_run(prefix): engine = prefix.virt_env.engine_vm() api = engine.get_api_v4() host_names = [h.name() for h in prefix.virt_env.host_vms()] vms_service = api.system_service().vms_service() vm = vms_service.list(search='name=%s' % VM0_NAME)[0] vm_service = vms_service.vm_service(vm.id) vm_service.start(use_cloud_init=True, vm=types.Vm(placement_policy=types.VmPlacementPolicy( hosts=[types.Host(name=sorted(host_names)[0])], ), initialization=types.Initialization( domain='lago.example.com', cloud_init=types.CloudInit( host=types.Host(address='VM0'), ), ))) testlib.assert_true_within_long( lambda: (vms_service.list(search='name=%s' % VM0_NAME)[0]).status == types.VmStatus.UP, )
def _add_storage_domain_3(api, p): dc = api.datacenters.get(DC_NAME) sd = api.storagedomains.add(p) nt.assert_true(sd) nt.assert_true( api.datacenters.get( DC_NAME, ).storagedomains.add( api.storagedomains.get( sd.name, ), ) ) if dc.storagedomains.get(sd.name).status.state == 'maintenance': sd.activate() testlib.assert_true_within_long( lambda: dc.storagedomains.get(sd.name).status.state == 'active' )
def test_template_export(api_v4): engine = api_v4.system_service() template_guest = test_utils.get_template_service(engine, TEMPLATE_GUEST) if template_guest is None: pytest.skip('{0}: template {1} is missing'.format( template_export.__name__, TEMPLATE_GUEST)) storage_domain = engine.storage_domains_service().list( search='name={}'.format(SD_TEMPLATES_NAME))[0] with test_utils.TestEvent(engine, 1164): # IMPORTEXPORT_STARTING_EXPORT_TEMPLATE event template_guest.export(storage_domain=types.StorageDomain( id=storage_domain.id, ), ) with test_utils.TestEvent(engine, 1156): # IMPORTEXPORT_EXPORT_TEMPLATE event testlib.assert_true_within_long( lambda: template_guest.get().status == types.TemplateStatus.OK, )
def run_vms(prefix): engine = prefix.virt_env.engine_vm() api = engine.get_api() vm_ip = '.'.join(engine.ip().split('.')[0:3] + ['199']) vm_gw = '.'.join(engine.ip().split('.')[0:3] + ['1']) host_names = [h.name() for h in prefix.virt_env.host_vms()] start_params = params.Action( use_cloud_init=True, vm=params.VM(initialization=params.Initialization( domain=params.Domain(name='lago.example.com'), cloud_init=params.CloudInit( host=params.Host(address='VM0'), users=params.Users( active=True, user=[params.User(user_name='root', password='******')]), network_configuration=params.NetworkConfiguration( nics=params.Nics(nic=[ params.NIC( name='eth0', boot_protocol='STATIC', on_boot=True, network=params.Network(ip=params.IP( address=vm_ip, netmask='255.255.255.0', gateway=vm_gw, ), ), ) ]), ), ), ), ), ) api.vms.get(VM0_NAME).start(start_params) api.vms.get(BACKUP_VM_NAME).start(start_params) start_params.vm.initialization.cloud_init = params.CloudInit( host=params.Host(address='VM2'), ) api.vms.get(VM2_NAME).start(start_params) testlib.assert_true_within_long( lambda: api.vms.get(VM0_NAME).status.state == 'up' and api.vms.get( BACKUP_VM_NAME).status.state == 'up', )
def test_remove_backup_vm_and_backup_snapshot(api_v4): engine = api_v4.system_service() backup_vm_service = test_utils.get_vm_service(engine, BACKUP_VM_NAME) vm2_snapshots_service = test_utils.get_vm_snapshots_service( engine, VM2_NAME) vm2_snapshot = vm2_snapshots_service.list()[-1] # power-off backup-vm with test_utils.TestEvent(engine, [33, 61]): # VM_DOWN(61) event # USER_STOP_VM(33) event backup_vm_service.stop() testlib.assert_true_within_long( lambda: backup_vm_service.get().status == types.VmStatus.DOWN) # remove backup_vm num_of_vms = len(engine.vms_service().list()) backup_vm_service.remove() assert len(engine.vms_service().list()) == (num_of_vms - 1) with test_utils.TestEvent(engine, 342): # USER_REMOVE_SNAPSHOT event # remove vm2 snapshot vm2_snapshots_service.snapshot_service(vm2_snapshot.id).remove()
def add_vm_template(api): #TODO: Fix the exported domain generation. #For the time being, add VM from Glance imported template. if api.templates.get(name=TEMPLATE_CIRROS) is None: raise SkipTest('%s: template %s not available.' % (add_vm_template.__name__, TEMPLATE_CIRROS)) vm_memory = 512 * MB vm_params = params.VM( name=VM1_NAME, description='CirrOS imported from Glance as Template', memory=vm_memory, cluster=params.Cluster(name=TEST_CLUSTER, ), template=params.Template(name=TEMPLATE_CIRROS, ), display=params.Display(type_='vnc', ), memory_policy=params.MemoryPolicy( guaranteed=vm_memory / 2, ballooning=False, ), os=params.OperatingSystem(type_='other_linux', ), timezone='Etc/GMT', type_='server', serial_number=params.SerialNumber( policy='custom', value='12345678', ), cpu=params.CPU( architecture='X86_64', topology=params.CpuTopology( cores=1, threads=2, sockets=1, ), ), ) api.vms.add(vm_params) testlib.assert_true_within_long( lambda: api.vms.get(VM1_NAME).status.state == 'down', ) disk_name = api.vms.get(VM1_NAME).disks.list()[0].name testlib.assert_true_within_long(lambda: api.vms.get(VM1_NAME).disks.get( disk_name).status.state == 'ok')
def test_hotunplug_disk(api_v4): engine = api_v4.system_service() disk_service = test_utils.get_disk_service(engine, DISK0_NAME) disk_attachments_service = test_utils.get_disk_attachments_service( engine, VM0_NAME) disk_attachment = disk_attachments_service.attachment_service( disk_service.get().id) with test_utils.TestEvent(engine, 2002): # USER_HOTUNPLUG_DISK(2,002) correlation_id = 'test_hotunplug_disk' assert disk_attachment.update(types.DiskAttachment(active=False), query={'correlation_id': correlation_id}) testlib.assert_true_within_long( lambda: test_utils.all_jobs_finished(engine, correlation_id)) testlib.assert_true_within_short( lambda: disk_service.get().status == types.DiskStatus.OK) testlib.assert_true_within_short( lambda: disk_attachment.get().active == False)
def snapshot_cold_merge(api): if api.vms.get(VM1_NAME) is None: raise SkipTest('Glance is not available') dead_snap1_params = params.Snapshot( description='dead_snap1', persist_memorystate=False, disks=params.Disks(disk=[ params.Disk(id=api.vms.get(VM1_NAME).disks.get(DISK1_NAME).id, ), ], ), ) api.vms.get(VM1_NAME).snapshots.add(dead_snap1_params) testlib.assert_true_within_long(lambda: api.vms.get(VM1_NAME).snapshots. list()[-1].snapshot_status == 'ok') dead_snap2_params = params.Snapshot( description='dead_snap2', persist_memorystate=False, disks=params.Disks(disk=[ params.Disk(id=api.vms.get(VM1_NAME).disks.get(DISK1_NAME).id, ), ], ), ) api.vms.get(VM1_NAME).snapshots.add(dead_snap2_params) testlib.assert_true_within_long(lambda: api.vms.get(VM1_NAME).snapshots. list()[-1].snapshot_status == 'ok') api.vms.get(VM1_NAME).snapshots.list()[-2].delete() testlib.assert_true_within_long( lambda: (len(api.vms.get(VM1_NAME).snapshots.list()) == 2) and (api.vms.get(VM1_NAME).snapshots.list()[-1].snapshot_status == 'ok'), )
def _add_storage_domain_4(api, p): system_service = api.system_service() sds_service = system_service.storage_domains_service() sd = sds_service.add(p) sd_service = sds_service.storage_domain_service(sd.id) testlib.assert_true_within_long( lambda: sd_service.get().status == sdk4.types.StorageDomainStatus.UNATTACHED ) dc_service = test_utils.data_center_service(system_service, DC_NAME) attached_sds_service = dc_service.storage_domains_service() attached_sds_service.add( sdk4.types.StorageDomain( id=sd.id, ), ) attached_sd_service = attached_sds_service.storage_domain_service(sd.id) testlib.assert_true_within_long( lambda: attached_sd_service.get().status == sdk4.types.StorageDomainStatus.ACTIVE )
def _add_storage_domain(api, p): system_service = api.system_service() sds_service = system_service.storage_domains_service() with test_utils.TestEvent(system_service, 956): # USER_ADD_STORAGE_DOMAIN(956) sd = sds_service.add(p) sd_service = sds_service.storage_domain_service(sd.id) testlib.assert_true_within_long(lambda: sd_service.get().status == sdk4 .types.StorageDomainStatus.UNATTACHED) dc_service = test_utils.data_center_service(system_service, DC_NAME) attached_sds_service = dc_service.storage_domains_service() with test_utils.TestEvent(system_service, [966, 962]): # USER_ACTIVATED_STORAGE_DOMAIN(966) # USER_ATTACH_STORAGE_DOMAIN_TO_POOL(962) attached_sds_service.add(sdk4.types.StorageDomain(id=sd.id, ), ) attached_sd_service = attached_sds_service.storage_domain_service( sd.id) testlib.assert_true_within_long(lambda: attached_sd_service.get( ).status == sdk4.types.StorageDomainStatus.ACTIVE)
def add_ldap_provider(prefix): engine = prefix.virt_env.engine_vm() machine_389ds = prefix.virt_env.get_vm(HOSTNAME_389DS) answer_file_src = os.path.join(os.environ.get('SUITE'), 'aaa-ldap-answer-file.conf') with open(answer_file_src, 'r') as f: content = f.read() content = content.replace('@389DS_IP@', machine_389ds.ip()) with tempfile.NamedTemporaryFile(delete=False) as temp: temp.write(content) engine.copy_to(temp.name, '/root/aaa-ldap-answer-file.conf') os.unlink(temp.name) result = machine_389ds.ssh([ 'systemctl', 'start', 'dirsrv@lago', ], ) nt.eq_(result.code, 0, 'Failed to start LDAP server. Exit code %s' % result.code) result = engine.ssh([ 'ovirt-engine-extension-aaa-ldap-setup', '--config-append=/root/aaa-ldap-answer-file.conf', '--log=/var/log/ovirt-engine-extension-aaa-ldap-setup.log', ], ) nt.eq_(result.code, 0, 'aaa-ldap-setup failed. Exit code is %s' % result.code) engine.service('ovirt-engine')._request_stop() testlib.assert_true_within_long( lambda: not engine.service('ovirt-engine').alive()) engine.service('ovirt-engine')._request_start() testlib.assert_true_within_long( lambda: engine.service('ovirt-engine').alive())
def restore_vm0_networking(ovirt_prefix): # Networking may not work after resume. We need this pseudo-test for the # purpose of reviving VM networking by rebooting the VM. We must be # careful to reboot just the guest OS, not to restart the whole VM, to keep # checking for contingent failures after resume. # A better solution might be using a guest OS other than Cirros. try: if _vm_ssh(ovirt_prefix, VM0_NAME, ['true'], tries=1).code == 0: return except getattr(ssh, 'LagoSSHTimeoutException', RuntimeError): # May happen on timeout, e.g. when networking is not working at all. pass host = _vm_host(ovirt_prefix, VM0_NAME) uri = 'qemu+tls://%s/system' % host.name() ret = host.ssh(['virsh', '-c', uri, 'reboot', '--mode', 'acpi', VM0_NAME]) nt.assert_equals(ret.code, EX_OK) testlib.assert_true_within_long( lambda: _ping(ovirt_prefix, VM0_PING_DEST) == EX_OK) engine = ovirt_prefix.virt_env.engine_vm().get_api_v4().system_service() _verify_vm_state(engine, VM0_NAME, types.VmStatus.UP) nt.assert_equals(_vm_ssh(ovirt_prefix, VM0_NAME, ['true']).code, 0)
def generic_import_from_glance(api, image_name=CIRROS_IMAGE_NAME, as_template=False, image_ext='_glance_disk', template_ext='_glance_template', dest_storage_domain=MASTER_SD_TYPE, dest_cluster=CLUSTER_NAME): glance_provider = api.storagedomains.get(SD_GLANCE_NAME) target_image = glance_provider.images.get(name=image_name) disk_name = image_name.replace(" ", "_") + image_ext template_name = image_name.replace(" ", "_") + template_ext import_action = params.Action( storage_domain=params.StorageDomain(name=dest_storage_domain, ), cluster=params.Cluster(name=dest_cluster, ), import_as_template=as_template, disk=params.Disk(name=disk_name, ), template=params.Template(name=template_name, ), ) nt.assert_true(target_image.import_image(import_action)) testlib.assert_true_within_long( lambda: api.disks.get(disk_name).status.state == 'ok', )
def add_disk(api): engine = api.system_service() vm0_service = test_utils.get_vm_service(engine, VM0_NAME) vm0_disk_attachments_service = test_utils.get_disk_attachments_service(engine, VM0_NAME) vm0_disk_attachments_service.add( types.DiskAttachment( disk=types.Disk( name=DISK0_NAME, format=types.DiskFormat.COW, initial_size=10 * GB, provisioned_size=1, sparse=True, storage_domains=[ types.StorageDomain( name=SD_NFS_NAME, ), ], ), interface=types.DiskInterface.VIRTIO, active=True, bootable=True, ), ) disk0_service = test_utils.get_disk_service(engine, DISK0_NAME) disk0_attachment_service = vm0_disk_attachments_service.attachment_service(disk0_service.get().id) testlib.assert_true_within_long( lambda: disk0_attachment_service.get().active == True ) testlib.assert_true_within_long( lambda: disk0_service.get().status == types.DiskStatus.OK )
def live_storage_migration(api): engine = api.system_service() disk_service = test_utils.get_disk_service(engine, DISK0_NAME) correlation_id = uuid.uuid4() disk_service.move(async=False, filter=False, storage_domain=types.StorageDomain(name=SD_ISCSI_NAME), query={'correlation_id': correlation_id}) testlib.assert_true_within_long( lambda: test_utils.all_jobs_finished(engine, correlation_id)) # Assert that the disk is on the correct storage domain, # its status is OK and the snapshot created for the migration # has been merged testlib.assert_true_within_long(lambda: api.follow_link(disk_service.get( ).storage_domains[0]).name == SD_ISCSI_NAME) vm0_snapshots_service = test_utils.get_vm_snapshots_service( engine, VM0_NAME) testlib.assert_true_within_long( lambda: len(vm0_snapshots_service.list()) == 1) testlib.assert_true_within_long( lambda: disk_service.get().status == types.DiskStatus.OK)