def test_hotplug_disk(prefix): api_v4 = prefix.virt_env.engine_vm().get_api_v4() engine = api_v4.system_service() disk_attachments_service = test_utils.get_disk_attachments_service( engine, VM0_NAME) disk_attachment = disk_attachments_service.add( types.DiskAttachment(disk=types.Disk( name=DISK0_NAME, provisioned_size=2 * GB, format=types.DiskFormat.COW, storage_domains=[ types.StorageDomain(name=SD_NFS_NAME, ), ], status=None, sparse=True, ), interface=types.DiskInterface.VIRTIO, bootable=False, active=True)) disks_service = engine.disks_service() disk_service = disks_service.disk_service(disk_attachment.disk.id) attachment_service = disk_attachments_service.attachment_service( disk_attachment.id) testlib.assert_true_within_short( lambda: attachment_service.get().active == True) testlib.assert_true_within_short( lambda: disk_service.get().status == types.DiskStatus.OK) assert_vm0_is_alive(prefix)
def hotplug_disk(api): disk2_params = params.Disk( name=DISK0_NAME, size=9 * GB, provisioned_size=2, interface='virtio', format='cow', storage_domains=params.StorageDomains( storage_domain=[ params.StorageDomain( name='nfs', ), ], ), status=None, sparse=True, bootable=False, active=True, ) api.vms.get(VM0_NAME).disks.add(disk2_params) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).disks.get(DISK0_NAME).status.state == 'ok' ) nt.assert_true(api.vms.get(VM0_NAME).disks.get(DISK0_NAME).active)
def migrate_vm(prefix, api): engine = api.system_service() vm_service = test_utils.get_vm_service(engine, VM0_NAME) hosts_service = engine.hosts_service() def _current_running_host(): host_id = vm_service.get().host.id host = hosts_service.list( search='id={}'.format(host_id))[0] return host.name src_host = _current_running_host() dst_host = sorted([h.name() for h in prefix.virt_env.host_vms() if h.name() != src_host])[0] # migrate() currently only returns None, but checks for errors internally vm_service.migrate( host=Host( name=dst_host ) ) testlib.assert_true_within_short( lambda: vm_service.get().status == VmStatus.UP ) nt.assert_equals( _current_running_host(), dst_host )
def add_glance_4(api): target_server = sdk4.types.OpenStackImageProvider( name=SD_GLANCE_NAME, description=SD_GLANCE_NAME, url=GLANCE_SERVER_URL, requires_authentication=False ) try: providers_service = api.system_service().openstack_image_providers_service() providers_service.add(target_server) glance = [] def get(): providers = [ provider for provider in providers_service.list() if provider.name == SD_GLANCE_NAME ] if not providers: return False instance = providers_service.provider_service(providers.pop().id) if instance: glance.append(instance) return True else: return False testlib.assert_true_within_short(func=get, allowed_exceptions=[errors.RequestError]) except (AssertionError, errors.RequestError): # RequestError if add method was failed. # AssertionError if add method succeed but we couldn't verify that glance was actually added return None return glance.pop()
def test_initialize_engine(prefix): engine = prefix.virt_env.engine_vm() answer_file_src = os.path.join(os.environ.get('SUITE'), 'engine-answer-file.conf') engine.copy_to( answer_file_src, '/tmp/answer-file', ) result = engine.ssh([ 'engine-setup', '--config-append=/tmp/answer-file', ], ) nt.eq_(result.code, 0, 'engine-setup failed. Exit code is %s' % result.code) testlib.assert_true_within_long( lambda: engine.service('ovirt-engine').alive()) testlib.assert_true_within_short( lambda: engine.service('ovirt-engine-dwhd').alive()) #TODO: set iSCSI, NFS, LDAP ports in firewall & re-enable it. result = engine.ssh([ 'systemctl', 'stop', 'firewalld', ], ) nt.eq_(result.code, 0, 'firwalld not stopped. Exit code is %s' % result.code)
def add_vm_blank(api): vm_memory = 512 * MB vm_params = params.VM( name=VM0_NAME, memory=vm_memory, os=params.OperatingSystem( type_='other_linux', ), type_='server', high_availability=params.HighAvailability( enabled=True, ), cluster=params.Cluster( name=TEST_CLUSTER, ), template=params.Template( name=TEMPLATE_BLANK, ), display=params.Display( smartcard_enabled=True, keyboard_layout='en-us', file_transfer_enabled=True, copy_paste_enabled=True, ), memory_policy=params.MemoryPolicy( guaranteed=vm_memory / 2, ), ) api.vms.add(vm_params) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).status.state == 'down', )
def hotplug_disk(api): vm_service = test_utils.get_vm_service(api.system_service(), VM0_NAME) disk_attachments_service = vm_service.disk_attachments_service() disk_attachment = disk_attachments_service.add( types.DiskAttachment(disk=types.Disk( name=DISK0_NAME, provisioned_size=2 * GB, format=types.DiskFormat.COW, storage_domains=[ types.StorageDomain(name=SD_NFS_NAME, ), ], status=None, sparse=True, ), interface=types.DiskInterface.VIRTIO, bootable=False, active=True)) disks_service = api.system_service().disks_service() disk_service = disks_service.disk_service(disk_attachment.disk.id) attachment_service = disk_attachments_service.attachment_service( disk_attachment.id) testlib.assert_true_within_short(lambda: attachment_service.get( ).active and disk_service.get().status == types.DiskStatus.OK)
def add_disk(api): engine = api.system_service() vm0_service = test_utils.get_vm_service(engine, VM0_NAME) glance_disk = test_utils.get_disk_service(engine, GLANCE_DISK_NAME) nt.assert_true(vm0_service and glance_disk) vm0_disk_attachments_service = test_utils.get_disk_attachments_service( engine, VM0_NAME) vm0_disk_attachments_service.add( types.DiskAttachment( disk=types.Disk( id=glance_disk.get().id, storage_domains=[ types.StorageDomain(name=SD_NFS_NAME, ), ], ), interface=types.DiskInterface.VIRTIO, active=True, bootable=True, ), ) disk_service = test_utils.get_disk_service(engine, GLANCE_DISK_NAME) testlib.assert_true_within_short( lambda: disk_service.get().status == types.DiskStatus.OK)
def hotplug_disk(api): engine = api.system_service() disk_attachments_service = test_utils.get_disk_attachments_service(engine, VM0_NAME) disk_attachment = disk_attachments_service.add( types.DiskAttachment( disk=types.Disk( name=DISK0_NAME, provisioned_size=2 * GB, format=types.DiskFormat.COW, storage_domains=[ types.StorageDomain( name=SD_NFS_NAME, ), ], status=None, sparse=True, ), interface=types.DiskInterface.VIRTIO, bootable=False, active=True ) ) disks_service = engine.disks_service() disk_service = disks_service.disk_service(disk_attachment.disk.id) attachment_service = disk_attachments_service.attachment_service(disk_attachment.id) testlib.assert_true_within_short( lambda: attachment_service.get().active == True ) testlib.assert_true_within_short( lambda: disk_service.get().status == types.DiskStatus.OK )
def test_initialize_engine(prefix): engine = prefix.virt_env.engine_vm() answer_file_src = os.path.join(os.environ.get('SUITE'), 'engine-answer-file.conf') engine.copy_to( answer_file_src, '/tmp/answer-file-pre', ) result = engine.ssh([ 'OTOPI_DEBUG=1', 'engine-setup', '--config-append=/tmp/answer-file-pre', ], ) engine.ssh([ 'ss', '-anp', ], ) nt.eq_(result.code, 0, 'engine-setup failed. Exit code is %s' % result.code) testlib.assert_true_within_long( lambda: engine.service('ovirt-engine').alive()) testlib.assert_true_within_short( lambda: engine.service('ovirt-engine-dwhd').alive())
def add_blank_vms(api): vm_memory = 256 * MB vm_params = params.VM( memory=vm_memory, os=params.OperatingSystem(type_='other_linux', ), type_='server', high_availability=params.HighAvailability(enabled=False, ), cluster=params.Cluster(name=TEST_CLUSTER, ), template=params.Template(name=TEMPLATE_BLANK, ), display=params.Display( smartcard_enabled=True, keyboard_layout='en-us', file_transfer_enabled=True, copy_paste_enabled=True, ), memory_policy=params.MemoryPolicy(guaranteed=vm_memory / 2, ), name=VM0_NAME) for vm in [VM0_NAME, VM2_NAME, BACKUP_VM_NAME]: vm_params.name = vm if vm == VM2_NAME: vm_params.high_availability.enabled = True vm_params.custom_emulated_machine = 'pc-i440fx-rhel7.4.0' api.vms.add(vm_params) testlib.assert_true_within_short( lambda: api.vms.get(vm).status.state == 'down', )
def add_disk(api): glance_disk = api.disks.get(GLANCE_DISK_NAME) if glance_disk: nt.assert_true( api.vms.get(VM0_NAME).disks.add( params.Disk( id=glance_disk.get_id(), active=True, bootable=True, ))) disk_params = params.Disk( name=DISK1_NAME, size=10 * GB, provisioned_size=1, interface='virtio', format='cow', storage_domains=params.StorageDomains(storage_domain=[ params.StorageDomain(name='nfs', ), ], ), status=None, sparse=True, active=True, bootable=True, ) nt.assert_true(api.vms.get(VM1_NAME).disks.add(disk_params)) if glance_disk: testlib.assert_true_within_short(lambda: api.vms.get( VM0_NAME).disks.get(GLANCE_DISK_NAME).status.state == 'ok') testlib.assert_true_within_short(lambda: api.vms.get(VM1_NAME).disks.get( DISK1_NAME).status.state == 'ok')
def add_vm_blank(api): engine = api.system_service() vms_service = engine.vms_service() vm_memory = 512 * MB vm_params = sdk4.types.Vm( name=VM0_NAME, memory=vm_memory, os=sdk4.types.OperatingSystem(type='rhel_7x64', ), type=sdk4.types.VmType.SERVER, high_availability=sdk4.types.HighAvailability(enabled=False, ), cluster=sdk4.types.Cluster(name=TEST_CLUSTER, ), template=sdk4.types.Template(name=TEMPLATE_BLANK, ), display=sdk4.types.Display(smartcard_enabled=True, keyboard_layout='en-us', file_transfer_enabled=True, copy_paste_enabled=True, type=sdk4.types.DisplayType.SPICE), usb=sdk4.types.Usb( enabled=True, type=sdk4.types.UsbType.NATIVE, ), memory_policy=sdk4.types.MemoryPolicy( ballooning=True, guaranteed=vm_memory // 2, ), ) vms_service.add(vm_params) vm0_vm_service = test_utils.get_vm_service(engine, VM0_NAME) testlib.assert_true_within_short( lambda: vm0_vm_service.get().status == sdk4.types.VmStatus.DOWN)
def add_vm_blank(api): vm_memory = 256 * MB vm_params = params.VM( memory=vm_memory, os=params.OperatingSystem(type_='other_linux', ), type_='server', high_availability=params.HighAvailability(enabled=False, ), cluster=params.Cluster(name=TEST_CLUSTER, ), template=params.Template(name=TEMPLATE_BLANK, ), display=params.Display( smartcard_enabled=True, keyboard_layout='en-us', file_transfer_enabled=True, copy_paste_enabled=True, ), memory_policy=params.MemoryPolicy(guaranteed=vm_memory / 2, ), name=VM0_NAME) api.vms.add(vm_params) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).status.state == 'down', ) vm_params.name = VM2_NAME vm_params.high_availability.enabled = True api.vms.add(vm_params) testlib.assert_true_within_short( lambda: api.vms.get(VM2_NAME).status.state == 'down', )
def migrate_vm(prefix, api): def current_running_host(): host_id = api.vms.get(VM0_NAME).host.id return api.hosts.get(id=host_id).name src_host = current_running_host() dst_host = sorted([h.name() for h in prefix.virt_env.host_vms() if h.name() != src_host])[0] migrate_params = params.Action( host=params.Host( name=dst_host ), ) nt.assert_true( api.vms.get(VM0_NAME).migrate(migrate_params) ) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).status.state == 'up' ) nt.assert_equals( current_running_host(), dst_host )
def add_vm_blank(api): vm_memory = 512 * MB vm_params = params.VM( name=VM0_NAME, memory=vm_memory, cluster=params.Cluster( name=TEST_CLUSTER, ), template=params.Template( name=TEMPLATE_BLANK, ), display=params.Display( smartcard_enabled=True, keyboard_layout='en-us', file_transfer_enabled=True, copy_paste_enabled=True, ), memory_policy=params.MemoryPolicy( guaranteed=vm_memory / 2, ), ) api.vms.add(vm_params) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).status.state == 'down', )
def hotplug_disk(api): disk2_params = params.Disk( name=DISK1_NAME, size=9 * GB, provisioned_size=2, interface='virtio', format='cow', storage_domains=params.StorageDomains( storage_domain=[ params.StorageDomain( name='iscsi', ), ], ), status=None, sparse=True, bootable=False, active=True, ) api.vms.get(VM0_NAME).disks.add(disk2_params) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).disks.get(DISK1_NAME).status.state == 'ok' ) nt.assert_true(api.vms.get(VM0_NAME).disks.get(DISK1_NAME).active)
def local_maintenance(prefix, api): logging.info("Waiting For System Stability...") time.sleep(wait_value) engine = api.system_service() vm_service = test_utils.get_vm_service(engine, VM_HE_NAME) hosts_service = engine.hosts_service() def _current_running_host(): host_id = vm_service.get().host.id host = hosts_service.list( search='id={}'.format(host_id))[0] return host he_host = _current_running_host() host_service = hosts_service.host_service(id=he_host.id) prev_host_id = he_host.id logging.info("Performing Deactivation...") host_service.deactivate() testlib.assert_true_within_long( lambda: host_service.get().status == types.HostStatus.MAINTENANCE or host_service.get(all_content=True).hosted_engine.local_maintenance ) logging.info("Performing Activation...") host_service.activate() testlib.assert_true_within_long( lambda: host_service.get().status == types.HostStatus.UNASSIGNED ) logging.info("Waiting For System Stability...") time.sleep(wait_value) logging.info("Waiting For Maintenance...") testlib.assert_true_within_long( lambda: not host_service.get(all_content=True).hosted_engine.local_maintenance ) logging.info("Waiting For Score...") testlib.assert_true_within_long( lambda: host_service.get(all_content=True).hosted_engine.score > 0 ) logging.info("Validating Migration...") he_host = _current_running_host() testlib.assert_true_within_short( lambda: prev_host_id != he_host.id )
def _update_cluster_version(api, new_version): engine = api.system_service() clusters_service = engine.clusters_service() cluster = clusters_service.list(search=TEST_CLUSTER)[0] cluster_service = clusters_service.cluster_service(cluster.id) vms_service = engine.vms_service() old_version = types.Version( major=cluster.version.major, minor=cluster.version.minor ) cluster_service.update( cluster=types.Cluster( version=new_version ) ) updating_version = clusters_service.list(search=TEST_CLUSTER)[0].version nt.assert_true( updating_version.major == new_version.major and updating_version.minor == new_version.minor ) down_vm = vms_service.list(search=VM1_NAME)[0] nt.assert_true(down_vm.custom_compatibility_version is None) up_vm = vms_service.list(search=VM0_NAME)[0] nt.assert_true( up_vm.custom_compatibility_version.major == old_version.major and up_vm.custom_compatibility_version.minor == old_version.minor ) nt.assert_true(up_vm.next_run_configuration_exists) events = engine.events_service() last_event = int(events.list(max=2)[0].id) vm_service = vms_service.vm_service(up_vm.id) vm_service.stop() testlib.assert_true_within_short( lambda: vms_service.list(search=VM0_NAME)[0].status == types.VmStatus.DOWN ) events = engine.events_service() testlib.assert_true_within_long( lambda: (next(e for e in events.list(from_=last_event) if e.code == 253)).code == 253, allowed_exceptions=[StopIteration] ) vm_service.start() testlib.assert_true_within_short( lambda: vms_service.list(search=VM0_NAME)[0].status == types.VmStatus.UP ) up_vm = vms_service.list(search=VM0_NAME)[0] nt.assert_false(up_vm.next_run_configuration_exists) nt.assert_true(up_vm.custom_compatibility_version is None)
def initialize_engine(prefix): engine = prefix.virt_env.engine_vm() answer_file_src = os.path.join(os.environ.get('ENGINE_ANSWER_FILE')) engine.copy_to( answer_file_src, '/tmp/answer-file', ) nics = engine.nics() nets = prefix.get_nets() engine_ip = [ nic.get('ip') for nic in nics if nets[nic.get('net')].is_management() ] host_name = socket.getfqdn() host_ip = socket.gethostbyname(host_name) with NamedTemporaryFile(delete=False) as sso_conf: sso_conf.write( ('SSO_ALTERNATE_ENGINE_FQDNS=' '"${{SSO_ALTERNATE_ENGINE_FQDNS}} {0} {1} {2}"\n').format( engine_ip.pop(), host_name, host_ip)) fqdn_conf = '/etc/ovirt-engine/engine.conf.d/99-custom-fqdn.conf' engine.copy_to(sso_conf.name, fqdn_conf) engine.ssh(['chmod', '644', fqdn_conf]) result = engine.ssh([ 'engine-setup', '--config-append=/tmp/answer-file', '--accept-defaults', ], ) nt.eq_(result.code, 0, 'engine-setup failed. Exit code is %s' % result.code) result = engine.ssh([ 'systemctl', 'start', 'ovirt-engine-notifier', ], ) nt.eq_(result.code, 0, 'engine-ovirt-notifier failed. Exit code is %s' % result.code) # Remove YUM leftovers that are in /dev/shm/* - just takes up memory. result = engine.ssh([ 'rm', '-rf', '/dev/shm/yum', '/dev/shm/yumdb', '/dev/shm/*.rpm', ]) testlib.assert_true_within_long( lambda: engine.service('ovirt-engine').alive()) testlib.assert_true_within_short( lambda: engine.service('ovirt-engine-dwhd').alive()) testlib.assert_true_within_short( lambda: engine.service('ovirt-engine-notifier').alive())
def test_sparsify_disk1(api_v4): engine = api_v4.system_service() disk_service = test_utils.get_disk_service(engine, DISK1_NAME) with test_utils.TestEvent(engine, 1325): # USER_SPARSIFY_IMAGE_START event disk_service.sparsify() testlib.assert_true_within_short( lambda: disk_service.get().status == types.DiskStatus.OK)
def test_remove_vm2_lease(api_v4): engine = api_v4.system_service() vm2_service = test_utils.get_vm_service(engine, VM2_NAME) vm2_service.update( vm=types.Vm(high_availability=types.HighAvailability(enabled=False, ), lease=types.StorageDomainLease(storage_domain=None))) testlib.assert_true_within_short(lambda: vm2_service.get().lease is None)
def set_global_maintenance(prefix): logging.info("Waiting For System Stability...") time.sleep(wait_value) host = prefix.virt_env.host_vms()[0] testlib.assert_true_within_short( lambda: _set_and_test_maintenance_mode(host, True))
def check_snapshot_with_memory(api): engine = api.system_service() vm_service = test_utils.get_vm_service(engine, VM0_NAME) testlib.assert_true_within_long( lambda: test_utils.get_snapshot(engine, VM0_NAME, SNAPSHOT_DESC_MEM). snapshot_status == types.SnapshotStatus.IN_PREVIEW) vm_service.start() testlib.assert_true_within_short( lambda: vm_service.get().status == types.VmStatus.UP)
def verify_vm_exported(api): engine = api.system_service() storage_domain_service = test_utils.get_storage_domain_service( engine, SD_TEMPLATES_NAME) testlib.assert_true_within_short( lambda: test_utils.get_storage_domain_vm_service_by_name( storage_domain_service, VM1_NAME).get( ).status == types.VmStatus.DOWN)
def set_global_maintenance(prefix): logging.info("Waiting For System Stability...") time.sleep(wait_value) host = prefix.virt_env.host_vms()[0] testlib.assert_true_within_short( lambda: _set_and_test_maintenance_mode(host, True) )
def vm_migrate(prefix): api = prefix.virt_env.engine_vm().get_api() host_names = [h.name() for h in prefix.virt_env.host_vms()] migrate_params = params.Action( host=params.Host(name=sorted(host_names)[2]), ) api.vms.get(VM0_NAME).migrate(migrate_params) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).status.state == 'up', )
def add_disks(api): engine = api.system_service() vm_service = test_utils.get_vm_service(engine, VM0_NAME) glance_disk = test_utils.get_disk_service(engine, GLANCE_DISK_NAME) nt.assert_true(vm_service and glance_disk) vm0_disk_attachments_service = test_utils.get_disk_attachments_service(engine, VM0_NAME) vm0_disk_attachments_service.add( types.DiskAttachment( disk=types.Disk( id=glance_disk.get().id, storage_domains=[ types.StorageDomain( name=SD_ISCSI_NAME, ), ], ), interface=types.DiskInterface.VIRTIO, active=True, bootable=True, ), ) disk_params = types.Disk( provisioned_size=1 * GB, format=types.DiskFormat.COW, status=None, sparse=True, active=True, bootable=True, ) for vm_name, disk_name, sd_name in ( (VM1_NAME, DISK1_NAME, SD_NFS_NAME), (VM2_NAME, DISK2_NAME, SD_SECOND_NFS_NAME), (BACKUP_VM_NAME, BACKUP_DISK_NAME, SD_NFS_NAME)): disk_params.name = disk_name disk_params.storage_domains = [ types.StorageDomain( name=sd_name, ) ] disk_attachments_service = test_utils.get_disk_attachments_service(engine, vm_name) nt.assert_true( disk_attachments_service.add(types.DiskAttachment( disk=disk_params, interface=types.DiskInterface.VIRTIO)) ) for disk_name in (GLANCE_DISK_NAME, DISK1_NAME, DISK2_NAME, BACKUP_DISK_NAME): disk_service = test_utils.get_disk_service(engine, disk_name) testlib.assert_true_within_short( lambda: disk_service.get().status == types.DiskStatus.OK )
def _remove_iface_from_vm(api, vm_name, iface_name): nics_service = test_utils.get_nics_service(api.system_service(), vm_name) nic = next(nic for nic in nics_service.list() if nic.name == iface_name) nic_service = nics_service.nic_service(nic.id) nic_service.deactivate() testlib.assert_true_within_short( lambda: nic_service.get().plugged == False) nic_service.remove()
def add_vm_blank(api): # Get the vms service vms_service=api.system_service().vms_service() #Create VM from blank template vm_memory=256*MB vm=types.Vm( name=VM0_NAME, memory=vm_memory, type=types.VmType.SERVER, os=types.OperatingSystem( type='other_linux', boot=types.Boot( devices=[types.BootDevice.HD, types.BootDevice.NETWORK] ), ), high_availability=types.HighAvailability( enabled=False ), cluster=types.Cluster( name=TEST_CLUSTER ), template=types.Template( name=TEMPLATE_BLANK ), display=types.Display( smartcard_enabled=True, keyboard_layout='en-us', file_transfer_enabled=True, copy_paste_enabled=True ), memory_policy=types.MemoryPolicy( guaranteed=vm_memory//2 ) ) #Add this VM vm=vms_service.add(vm) #Check that VM was added vm_service=vms_service.vm_service(vm.id) testlib.assert_true_within_short( lambda: vm_service.get().status == types.VmStatus.DOWN ) #Add another VM vm.id=None vm.name=VM1_NAME vm.initialization=None vm=vms_service.add(vm) #Check that the second VM was added vm_service=vms_service.vm_service(vm.id) testlib.assert_true_within_short( lambda: vm_service.get().status == types.VmStatus.DOWN )
def sparsify_disk1(api): engine = api.system_service() disk_service = test_utils.get_disk_service(engine, DISK1_NAME) with test_utils.TestEvent(engine, 1325): # USER_SPARSIFY_IMAGE_START event disk_service.sparsify() testlib.assert_true_within_short( lambda: disk_service.get().status == types.DiskStatus.OK )
def vm_run(prefix): api = prefix.virt_env.engine_vm().get_api() host_names = [h.name() for h in prefix.virt_env.host_vms()] start_params = params.Action(vm=params.VM( placement_policy=params.VmPlacementPolicy( host=params.Host(name=sorted(host_names)[1]), ), ), ) api.vms.get(VM0_NAME).start(start_params) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).status.state == 'up', )
def verify_vm_import(api): engine = api.system_service() vm_service = test_utils.get_vm_service(engine, IMPORTED_VM_NAME) testlib.assert_true_within_short( lambda: vm_service.get().status == types.VmStatus.DOWN) # Remove the imported VM num_of_vms = len(engine.vms_service().list()) vm_service.remove() nt.assert_true(len(engine.vms_service().list()) == (num_of_vms - 1))
def test_initialize_engine(prefix): engine = prefix.virt_env.engine_vm() answer_file_src = os.path.join(os.environ.get('SUITE'), 'engine-answer-file.conf') engine.copy_to( answer_file_src, '/tmp/answer-file', ) nics = engine.nics() nets = prefix.get_nets() engine_ip = [ nic.get('ip') for nic in nics if nets[nic.get('net')].is_management() ] with NamedTemporaryFile(delete=False) as sso_conf: sso_conf.write(('SSO_ALTERNATE_ENGINE_FQDNS=' '"${{SSO_ALTERNATE_ENGINE_FQDNS}} {0}"\n').format( engine_ip.pop())) fqdn_conf = '/etc/ovirt-engine/engine.conf.d/99-custom-fqdn.conf' engine.copy_to(sso_conf.name, fqdn_conf) engine.ssh(['chmod', '644', fqdn_conf]) result = engine.ssh([ 'engine-setup', '--config-append=/tmp/answer-file', '--accept-defaults', ], ) nt.eq_(result.code, 0, 'engine-setup failed. Exit code is %s' % result.code) # Remove YUM leftovers that are in /dev/shm/* - just takes up memory. result = engine.ssh([ 'rm', '-rf', '/dev/shm/yum', '/dev/shm/yumdb', '/dev/shm/*.rpm', ]) # TODO: set iSCSI, NFS, LDAP ports in firewall & re-enable it. result = engine.ssh([ 'systemctl', 'stop', 'firewalld', ], ) nt.eq_(result.code, 0, 'firwalld not stopped. Exit code is %s' % result.code) testlib.assert_true_within_long( lambda: engine.service('ovirt-engine').alive()) testlib.assert_true_within_short( lambda: engine.service('ovirt-engine-dwhd').alive())
def add_disks(api): engine = api.system_service() vm_service = test_utils.get_vm_service(engine, VM0_NAME) glance_disk = test_utils.get_disk_service(engine, GLANCE_DISK_NAME) nt.assert_true(vm_service and glance_disk) vm0_disk_attachments_service = (test_utils.get_disk_attachments_service( engine, VM0_NAME)) vm0_disk_attachments_service.add( types.DiskAttachment( disk=types.Disk( id=glance_disk.get().id, storage_domains=[ types.StorageDomain(name=SD_ISCSI_NAME, ), ], ), interface=types.DiskInterface.VIRTIO, active=True, bootable=True, ), ) disk_params = types.Disk( provisioned_size=1 * GB, format=types.DiskFormat.COW, status=None, sparse=True, active=True, bootable=True, ) for vm_name, disk_name, sd_name in ((VM1_NAME, DISK1_NAME, SD_NFS_NAME), (VM2_NAME, DISK2_NAME, SD_SECOND_NFS_NAME), (BACKUP_VM_NAME, BACKUP_DISK_NAME, SD_NFS_NAME)): disk_params.name = disk_name disk_params.storage_domains = [types.StorageDomain(name=sd_name, )] disk_attachments_service = (test_utils.get_disk_attachments_service( engine, vm_name)) nt.assert_true( disk_attachments_service.add( types.DiskAttachment(disk=disk_params, interface=types.DiskInterface.VIRTIO))) for disk_name in ( GLANCE_DISK_NAME, DISK1_NAME, DISK2_NAME, BACKUP_DISK_NAME, ): disk_service = test_utils.get_disk_service(engine, disk_name) testlib.assert_true_within_short( lambda: disk_service.get().status == types.DiskStatus.OK)
def add_console(api): vm = api.vms.get(VM0_NAME) vm.graphicsconsoles.add( params.GraphicsConsole( protocol='vnc', ) ) testlib.assert_true_within_short( lambda: len(api.vms.get(VM0_NAME).graphicsconsoles.list()) == 2 )
def clear_global_maintenance(prefix): logging.info("Waiting For System Stability...") time.sleep(wait_value) host = prefix.virt_env.host_vms()[0] testlib.assert_true_within_short( lambda: _set_and_test_maintenance_mode(host, False)) testlib.assert_true_within_long( lambda: _is_state_maintenance(host, "GlobalMaintenance") is False)
def verify_vm1_exported(api): engine = api.system_service() _verify_vm_state(engine, VM1_NAME, types.VmStatus.DOWN) storage_domain_service = test_utils.get_storage_domain_service(engine, SD_TEMPLATES_NAME) vm_sd_service = test_utils.get_storage_domain_vm_service_by_name( storage_domain_service, VM1_NAME) testlib.assert_true_within_short( lambda: vm_sd_service.get().status == types.VmStatus.DOWN )
def test_verify_add_vm1_from_template(api_v4): engine = api_v4.system_service() _verify_vm_state(engine, VM1_NAME, types.VmStatus.DOWN) disks_service = engine.disks_service() vm1_disk_attachments_service = test_utils.get_disk_attachments_service( engine, VM1_NAME) for disk_attachment in vm1_disk_attachments_service.list(): disk_service = disks_service.disk_service(disk_attachment.disk.id) testlib.assert_true_within_short( lambda: disk_service.get().status == types.DiskStatus.OK)
def _remove_iface_from_vm(api, vm_name, iface_name): nics_service = test_utils.get_nics_service(api.system_service(), vm_name) nic = next(nic for nic in nics_service.list() if nic.name == iface_name) nic_service = nics_service.nic_service(nic.id) nic_service.deactivate() testlib.assert_true_within_short( lambda: nic_service.get().plugged == False ) nic_service.remove()
def verify_add_vm1_from_template(api): engine = api.system_service() _verify_vm_state(engine, VM1_NAME, types.VmStatus.DOWN) disks_service = engine.disks_service() vm1_disk_attachments_service = test_utils.get_disk_attachments_service(engine, VM1_NAME) for disk_attachment in vm1_disk_attachments_service.list(): disk_service = disks_service.disk_service(disk_attachment.disk.id) testlib.assert_true_within_short( lambda: disk_service.get().status == types.DiskStatus.OK )
def migrate_vm(prefix, api): engine = api.system_service() vm_service = test_utils.get_vm_service(engine, VM0_NAME) vm_id = vm_service.get().id hosts_service = engine.hosts_service() def _current_running_host(): host_id = vm_service.get().host.id host = hosts_service.list( search='id={}'.format(host_id))[0] return host.name src_host = _current_running_host() dst_host = sorted([h.name() for h in prefix.virt_env.host_vms() if h.name() != src_host])[0] print('source host: {}'.format(src_host)) print('destination host: {}'.format(dst_host)) assert_finished_within_long( vm_service.migrate, engine, host=Host(name=dst_host) ) # Verify that VDSM cleaned the vm in the source host def vm_is_not_on_host(): src_host_obj = [ h for h in prefix.virt_env.host_vms() if h.name() == src_host ][0] ret = src_host_obj.ssh(['vdsm-client', 'Host', 'getVMList']) if ret: raise RuntimeError('Failed to call vdsm-client in {}, {}'.format( src_host, ret.err ) ) parsed_output = json.loads(ret.out) return vm_id not in parsed_output testlib.assert_true_within_short(vm_is_not_on_host) testlib.assert_true_within_short( lambda: vm_service.get().status == VmStatus.UP ) nt.assert_equals( _current_running_host(), dst_host )
def vm_migrate(prefix): api = prefix.virt_env.engine_vm().get_api() host_names = [h.name() for h in prefix.virt_env.host_vms()] migrate_params = params.Action( host=params.Host( name=sorted(host_names)[1] ), ) api.vms.get(VM0_NAME).migrate(migrate_params) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).status.state == 'up', )
def clear_global_maintenance(prefix): logging.info("Waiting For System Stability...") time.sleep(wait_value) host = prefix.virt_env.host_vms()[0] testlib.assert_true_within_short( lambda: _set_and_test_maintenance_mode(host, False) ) testlib.assert_true_within_long( lambda: _is_state_maintenance(host, "GlobalMaintenance") is False )
def vm_run(prefix): api = prefix.virt_env.engine_vm().get_api() host_names = [h.name() for h in prefix.virt_env.host_vms()] start_params = params.Action( use_cloud_init=True, vm=params.VM( placement_policy=params.VmPlacementPolicy( host=params.Host( name=sorted(host_names)[0] ), ), initialization=params.Initialization( domain=params.Domain( name='lago.example.com' ), cloud_init=params.CloudInit( host=params.Host( address='VM0' ), users=params.Users( active=True, user=[params.User( user_name='root', password='******' )] ), network_configuration=params.NetworkConfiguration( nics=params.Nics( nic=[params.NIC( name='eth0', boot_protocol='STATIC', on_boot='True', network=params.Network( ip=params.IP( address='192.168.200.200.', netmask='255.255.255.0', gateway='192.168.200.1', ), ), )] ), ), ), ), ), ) api.vms.get(VM0_NAME).start(start_params) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).status.state == 'up', )
def _start_he_vm(host): logging.info("Starting VM...") ret = host.ssh(["hosted-engine", "--vm-start"]) nt.assert_equals(ret.code, 0) logging.info("Command succeeded") logging.info("Waiting for VM to be UP...") testlib.assert_true_within_short(lambda: any( v["engine-status"]["vm"] == "up" for k, v in _get_he_status(host).items() if k.isdigit() )) logging.info("VM is UP.")
def _shutdown_he_vm(host): logging.info("Shutting down HE VM on host: %s", host.name()) ret = host.ssh(["hosted-engine", "--vm-shutdown"]) nt.assert_equals(ret.code, 0) logging.info("Command succeeded") logging.info("Waiting for VM to be down...") testlib.assert_true_within_short(lambda: all( v["engine-status"]["vm"] != "up" for k, v in _get_he_status(host).items() if k.isdigit() )) logging.info("VM is down.")
def hotunplug_disk(api): engine = api.system_service() disk_service = test_utils.get_disk_service(engine, DISK0_NAME) disk_attachments_service = test_utils.get_disk_attachments_service(engine, VM0_NAME) disk_attachment = disk_attachments_service.attachment_service(disk_service.get().id) with test_utils.TestEvent(engine, 2002): # USER_HOTUNPLUG_DISK(2,002) nt.assert_true( disk_attachment.update(types.DiskAttachment(active=False)) ) testlib.assert_true_within_short( lambda: disk_attachment.get().active == False )
def vm_run(prefix): api = prefix.virt_env.engine_vm().get_api() host_names = [h.name() for h in prefix.virt_env.host_vms()] start_params = params.Action( vm=params.VM( placement_policy=params.VmPlacementPolicy( host=params.Host( name=sorted(host_names)[0] ), ), ), ) api.vms.get(VM0_NAME).start(start_params) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).status.state == 'up', )
def add_disk(api): glance_disk = api.disks.get(GLANCE_DISK_NAME) if glance_disk: nt.assert_true( api.vms.get(VM0_NAME).disks.add( params.Disk( id = glance_disk.get_id(), active=True, bootable=True, ) ) ) disk_params = params.Disk( name=DISK1_NAME, size=10 * GB, provisioned_size=1, interface='virtio', format='cow', storage_domains=params.StorageDomains( storage_domain=[ params.StorageDomain( name='nfs', ), ], ), status=None, sparse=True, active=True, bootable=True, ) if api.vms.get(VM1_NAME) is not None: nt.assert_true( api.vms.get(VM1_NAME).disks.add(disk_params) ) if glance_disk: testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).disks.get(GLANCE_DISK_NAME).status.state == 'ok' ) if api.vms.get(VM1_NAME) is not None: testlib.assert_true_within_short( lambda: api.vms.get(VM1_NAME).disks.get(DISK1_NAME).status.state == 'ok' )
def remove_vm2_lease(api): engine = api.system_service() vm2_service = test_utils.get_vm_service(engine, VM2_NAME) vm2_service.update( vm=types.Vm( high_availability=types.HighAvailability( enabled=False, ), lease=types.StorageDomainLease( storage_domain=None ) ) ) testlib.assert_true_within_short( lambda: vm2_service.get().lease is None )
def add_vm_blank(api): engine = api.system_service() vms = engine.vms_service() new_vm = types.Vm( os=types.OperatingSystem( type='other_linux', boot=types.Boot(devices=[types.BootDevice.NETWORK]) ), type=types.VmType.SERVER, cluster=types.Cluster(name=TEST_CLUSTER), template=types.Template(name=TEMPLATE_BLANK), name=VM_NAME ) new_vm = vms.add(new_vm) vm_service = vms.vm_service(new_vm.id) testlib.assert_true_within_short( lambda: vm_service.get().status == types.VmStatus.DOWN )
def add_vm_pool(api): engine = api.system_service() pools_service = engine.vm_pools_service() pool_cluster = engine.clusters_service().list(search='name={}'.format(TEST_CLUSTER))[0] pool_template = engine.templates_service().list(search='name={}'.format(TEMPLATE_CIRROS))[0] with test_utils.TestEvent(engine, 302): pools_service.add( pool=types.VmPool( name=VMPOOL_NAME, cluster=pool_cluster, template=pool_template, use_latest_template_version=True, ) ) vm_service = test_utils.get_vm_service(engine, VMPOOL_NAME+'-1') testlib.assert_true_within_short( lambda: vm_service.get().status == types.VmStatus.DOWN, allowed_exceptions=[IndexError] )
def extend_disk1(api): engine = api.system_service() disk_attachments_service = test_utils.get_disk_attachments_service(engine, VM1_NAME) for disk_attachment in disk_attachments_service.list(): disk = api.follow_link(disk_attachment.disk) if disk.name == DISK1_NAME: attachment_service = disk_attachments_service.attachment_service(disk_attachment.id) with test_utils.TestEvent(engine, 371): # USER_EXTEND_DISK_SIZE_SUCCESS(371) attachment_service.update( types.DiskAttachment( disk=types.Disk(provisioned_size=2 * GB,))) disk_service = test_utils.get_disk_service(engine, DISK1_NAME) testlib.assert_true_within_short( lambda: disk_service.get().status == types.DiskStatus.OK ) testlib.assert_true_within_short( lambda: disk_service.get().provisioned_size == 2 * GB )
def import_templates(api): #TODO: Fix the exported domain generation raise SkipTest('Exported domain generation not supported yet') templates = api.storagedomains.get( SD_TEMPLATES_NAME, ).templates.list( unregistered=True, ) for template in templates: template.register( action=params.Action( cluster=params.Cluster( name=CLUSTER_NAME, ), ), ) for template in api.templates.list(): testlib.assert_true_within_short( lambda: api.templates.get(template.name).status.state == 'ok', )
def template_update(api): template_cirros = test_utils.get_template_service(api.system_service(), TEMPLATE_CIRROS) if template_cirros is None: raise SkipTest('{0}: template {1} is missing'.format( template_update.__name__, TEMPLATE_CIRROS ) ) new_comment = "comment by ovirt-system-tests" template_cirros.update( template = types.Template( comment=new_comment ) ) testlib.assert_true_within_short( lambda: template_cirros.get().status == types.TemplateStatus.OK ) nt.assert_true( template_cirros.get().comment == new_comment )
def check_update_host(api): engine = api.system_service() host = _random_host_from_dc_4(api, DC_NAME) host_service = engine.hosts_service().host_service(id=host.id) events_service = engine.events_service() last_event = int(events_service.list(max=2)[0].id) host_service.upgrade_check() # HOST_AVAILABLE_UPDATES_STARTED(884) testlib.assert_true_within_short( lambda: (next(e for e in events_service.list(from_=last_event) if e.code == 884)).code == 884, allowed_exceptions=[StopIteration] ) # HOST_AVAILABLE_UPDATES_FINISHED(885) last_event = int(events_service.list(max=2)[0].id) testlib.assert_true_within_short( lambda: (next(e for e in events_service.list(from_=last_event) if e.code == 885)).code == 885, allowed_exceptions=[StopIteration] )