def add_disk(api): engine = api.system_service() vm0_service = test_utils.get_vm_service(engine, VM0_NAME) glance_disk = test_utils.get_disk_service(engine, GLANCE_DISK_NAME) nt.assert_true(vm0_service and glance_disk) vm0_disk_attachments_service = test_utils.get_disk_attachments_service( engine, VM0_NAME) vm0_disk_attachments_service.add( types.DiskAttachment( disk=types.Disk( id=glance_disk.get().id, storage_domains=[ types.StorageDomain(name=SD_NFS_NAME, ), ], ), interface=types.DiskInterface.VIRTIO, active=True, bootable=True, ), ) disk_service = test_utils.get_disk_service(engine, GLANCE_DISK_NAME) testlib.assert_true_within_short( lambda: disk_service.get().status == types.DiskStatus.OK)
def add_disks(api): engine = api.system_service() vm_service = test_utils.get_vm_service(engine, VM0_NAME) glance_disk = test_utils.get_disk_service(engine, GLANCE_DISK_NAME) nt.assert_true(vm_service and glance_disk) vm0_disk_attachments_service = test_utils.get_disk_attachments_service(engine, VM0_NAME) vm0_disk_attachments_service.add( types.DiskAttachment( disk=types.Disk( id=glance_disk.get().id, storage_domains=[ types.StorageDomain( name=SD_ISCSI_NAME, ), ], ), interface=types.DiskInterface.VIRTIO, active=True, bootable=True, ), ) disk_params = types.Disk( provisioned_size=1 * GB, format=types.DiskFormat.COW, status=None, sparse=True, active=True, bootable=True, ) for vm_name, disk_name, sd_name in ( (VM1_NAME, DISK1_NAME, SD_NFS_NAME), (VM2_NAME, DISK2_NAME, SD_SECOND_NFS_NAME), (BACKUP_VM_NAME, BACKUP_DISK_NAME, SD_NFS_NAME)): disk_params.name = disk_name disk_params.storage_domains = [ types.StorageDomain( name=sd_name, ) ] disk_attachments_service = test_utils.get_disk_attachments_service(engine, vm_name) nt.assert_true( disk_attachments_service.add(types.DiskAttachment( disk=disk_params, interface=types.DiskInterface.VIRTIO)) ) for disk_name in (GLANCE_DISK_NAME, DISK1_NAME, DISK2_NAME, BACKUP_DISK_NAME): disk_service = test_utils.get_disk_service(engine, disk_name) testlib.assert_true_within_short( lambda: disk_service.get().status == types.DiskStatus.OK )
def add_disks(api): engine = api.system_service() vm_service = test_utils.get_vm_service(engine, VM0_NAME) glance_disk = test_utils.get_disk_service(engine, GLANCE_DISK_NAME) nt.assert_true(vm_service and glance_disk) vm0_disk_attachments_service = (test_utils.get_disk_attachments_service( engine, VM0_NAME)) vm0_disk_attachments_service.add( types.DiskAttachment( disk=types.Disk( id=glance_disk.get().id, storage_domains=[ types.StorageDomain(name=SD_ISCSI_NAME, ), ], ), interface=types.DiskInterface.VIRTIO, active=True, bootable=True, ), ) disk_params = types.Disk( provisioned_size=1 * GB, format=types.DiskFormat.COW, status=None, sparse=True, active=True, bootable=True, ) for vm_name, disk_name, sd_name in ((VM1_NAME, DISK1_NAME, SD_NFS_NAME), (VM2_NAME, DISK2_NAME, SD_SECOND_NFS_NAME), (BACKUP_VM_NAME, BACKUP_DISK_NAME, SD_NFS_NAME)): disk_params.name = disk_name disk_params.storage_domains = [types.StorageDomain(name=sd_name, )] disk_attachments_service = (test_utils.get_disk_attachments_service( engine, vm_name)) nt.assert_true( disk_attachments_service.add( types.DiskAttachment(disk=disk_params, interface=types.DiskInterface.VIRTIO))) for disk_name in ( GLANCE_DISK_NAME, DISK1_NAME, DISK2_NAME, BACKUP_DISK_NAME, ): disk_service = test_utils.get_disk_service(engine, disk_name) testlib.assert_true_within_short( lambda: disk_service.get().status == types.DiskStatus.OK)
def test_add_disks(engine_api, cirros_image_glance_disk_name): engine = engine_api.system_service() vm_service = test_utils.get_vm_service(engine, VM0_NAME) glance_disk = test_utils.get_disk_service( engine, cirros_image_glance_disk_name, ) assert vm_service and glance_disk vm0_disk_attachments_service = test_utils.get_disk_attachments_service( engine, VM0_NAME) vm0_disk_attachments_service.add( types.DiskAttachment( disk=types.Disk( id=glance_disk.get().id, storage_domains=[ types.StorageDomain(name=SD_ISCSI_NAME, ), ], ), interface=types.DiskInterface.VIRTIO, active=True, bootable=True, ), ) disk_params = types.Disk( provisioned_size=1 * GB, format=types.DiskFormat.COW, status=None, sparse=True, active=True, bootable=True, backup=types.DiskBackup.INCREMENTAL, ) for vm_name, disk_name, sd_name in ((VM1_NAME, DISK1_NAME, SD_NFS_NAME), (VM2_NAME, DISK2_NAME, SD_SECOND_NFS_NAME), (BACKUP_VM_NAME, BACKUP_DISK_NAME, SD_NFS_NAME)): disk_params.name = disk_name disk_params.storage_domains = [types.StorageDomain(name=sd_name, )] disk_attachments_service = test_utils.get_disk_attachments_service( engine, vm_name) assert disk_attachments_service.add( types.DiskAttachment(disk=disk_params, interface=types.DiskInterface.VIRTIO)) for disk_name in (cirros_image_glance_disk_name, DISK1_NAME, DISK2_NAME, BACKUP_DISK_NAME): disk_service = test_utils.get_disk_service(engine, disk_name) assertions.assert_true_within_short( lambda: disk_service.get().status == types.DiskStatus.OK)
def ovf_import(api): # Read the OVF file and replace the disk id engine = api.system_service() disk_service = test_utils.get_disk_service(engine, DISK0_NAME) disk_id = disk_service.get().id ovf_file = os.path.join(os.environ['SUITE'], 'files', 'test-vm.ovf') ovf_text = open(ovf_file).read() ovf_text = ovf_text.replace( "ovf:diskId='52df5324-2230-40d9-9d3d-8cbb2aa33ba6'", "ovf:diskId='%s'" % (disk_id,) ) # Upload OVF vms_service = engine.vms_service() vms_service.add( types.Vm( name=OVF_VM_NAME, cluster=types.Cluster( name=TEST_CLUSTER, ), initialization=types.Initialization( configuration=types.Configuration( type=types.ConfigurationType.OVA, data=ovf_text ) ) ) ) # Check the VM exists nt.assert_true(test_utils.get_vm_service(engine, OVF_VM_NAME) is not None)
def live_storage_migration(api): engine = api.system_service() disk_service = test_utils.get_disk_service(engine, DISK0_NAME) correlation_id = uuid.uuid4() disk_service.move( async=False, filter=False, storage_domain=types.StorageDomain( name=SD_ISCSI_NAME ), query={'correlation_id': correlation_id} ) testlib.assert_true_within_long(lambda: test_utils.all_jobs_finished(engine, correlation_id)) # Assert that the disk is on the correct storage domain, # its status is OK and the snapshot created for the migration # has been merged testlib.assert_true_within_long( lambda: api.follow_link(disk_service.get().storage_domains[0]).name == SD_ISCSI_NAME ) vm0_snapshots_service = test_utils.get_vm_snapshots_service(engine, VM0_NAME) testlib.assert_true_within_long( lambda: len(vm0_snapshots_service.list()) == 1 ) testlib.assert_true_within_long( lambda: disk_service.get().status == types.DiskStatus.OK )
def test_live_storage_migration(api_v4): pytest.skip("TODO: el8 fails all the time") engine = api_v4.system_service() disk_service = test_utils.get_disk_service(engine, DISK0_NAME) correlation_id = 'live_storage_migration' disk_service.move(async=False, filter=False, storage_domain=types.StorageDomain(name=SD_ISCSI_NAME), query={'correlation_id': correlation_id}) testlib.assert_true_within_long( lambda: test_utils.all_jobs_finished(engine, correlation_id)) # Assert that the disk is on the correct storage domain, # its status is OK and the snapshot created for the migration # has been merged testlib.assert_true_within_long(lambda: api_v4.follow_link( disk_service.get().storage_domains[0]).name == SD_ISCSI_NAME) vm0_snapshots_service = test_utils.get_vm_snapshots_service( engine, VM0_NAME) testlib.assert_true_within_long( lambda: len(vm0_snapshots_service.list()) == 1) testlib.assert_true_within_long( lambda: disk_service.get().status == types.DiskStatus.OK)
def add_disk(api): engine = api.system_service() vm0_service = test_utils.get_vm_service(engine, VM0_NAME) vm0_disk_attachments_service = test_utils.get_disk_attachments_service( engine, VM0_NAME) vm0_disk_attachments_service.add( types.DiskAttachment( disk=types.Disk( name=DISK0_NAME, format=types.DiskFormat.COW, initial_size=10 * GB, provisioned_size=1, sparse=True, storage_domains=[ types.StorageDomain(name=MASTER_SD_NAME, ), ], ), interface=types.DiskInterface.VIRTIO, active=True, bootable=True, ), ) disk0_service = test_utils.get_disk_service(engine, DISK0_NAME) disk0_attachment_service = vm0_disk_attachments_service.attachment_service( disk0_service.get().id) testlib.assert_true_within_long( lambda: disk0_attachment_service.get().active == True) testlib.assert_true_within_long( lambda: disk0_service.get().status == types.DiskStatus.OK)
def add_directlun(prefix): luns = test_utils.get_luns(prefix, SD_ISCSI_HOST_NAME, SD_ISCSI_PORT, SD_ISCSI_TARGET, from_lun=SD_ISCSI_NR_LUNS + 1) dlun_params = sdk4.types.Disk( name=DLUN_DISK_NAME, format=sdk4.types.DiskFormat.RAW, lun_storage=sdk4.types.HostStorage( type=sdk4.types.StorageType.ISCSI, logical_units=luns, ), ) api = prefix.virt_env.engine_vm().get_api_v4() engine = api.system_service() disk_attachments_service = test_utils.get_disk_attachments_service( engine, VM0_NAME) with test_utils.TestEvent(engine, 97): disk_attachments_service.add( sdk4.types.DiskAttachment( disk=dlun_params, interface=sdk4.types.DiskInterface.VIRTIO_SCSI)) disk_service = test_utils.get_disk_service(engine, DLUN_DISK_NAME) attachment_service = disk_attachments_service.attachment_service( disk_service.get().id) nt.assert_not_equal( attachment_service.get(), None, 'Failed to attach Direct LUN disk to {}'.format(VM0_NAME))
def test_sparsify_disk1(api_v4): engine = api_v4.system_service() disk_service = test_utils.get_disk_service(engine, DISK1_NAME) with test_utils.TestEvent(engine, 1325): # USER_SPARSIFY_IMAGE_START event disk_service.sparsify() testlib.assert_true_within_short( lambda: disk_service.get().status == types.DiskStatus.OK)
def sparsify_disk1(api): engine = api.system_service() disk_service = test_utils.get_disk_service(engine, DISK1_NAME) with test_utils.TestEvent(engine, 1325): # USER_SPARSIFY_IMAGE_START event disk_service.sparsify() testlib.assert_true_within_short( lambda: disk_service.get().status == types.DiskStatus.OK )
def test_sparsify_disk1(engine_api): engine = engine_api.system_service() disk_service = test_utils.get_disk_service(engine, DISK1_NAME) with engine_utils.wait_for_event(engine, 1325): # USER_SPARSIFY_IMAGE_START event disk_service.sparsify() with engine_utils.wait_for_event( engine, 1326): # USER_SPARSIFY_IMAGE_FINISH_SUCCESS pass # Make sure disk is unlocked assert disk_service.get().status == types.DiskStatus.OK
def hotunplug_disk(api): engine = api.system_service() vm_service = test_utils.get_vm_service(engine, VM0_NAME) disk_service = test_utils.get_disk_service(engine, DISK0_NAME) disk_attachments_service = vm_service.disk_attachments_service() disk_attachment = disk_attachments_service.attachment_service( disk_service.get().id) nt.assert_true(disk_attachment.update(types.DiskAttachment(active=False))) testlib.assert_true_within_short( lambda: disk_attachment.get().active == False)
def test_add_floating_disk(engine_api, disks_service): disks_service.add( types.Disk( name=FLOATING_DISK_NAME, format=types.DiskFormat.COW, provisioned_size=2 * MB, active=True, storage_domains=[types.StorageDomain(name=SD_SECOND_NFS_NAME)])) engine = engine_api.system_service() disk_service = test_utils.get_disk_service(engine, FLOATING_DISK_NAME) assertions.assert_true_within_short( lambda: disk_service.get().status == types.DiskStatus.OK)
def cold_storage_migration(api): disk_service = test_utils.get_disk_service(api.system_service(), DISK2_NAME) # Cold migrate the disk to ISCSI storage domain and then migrate it back # to the NFS domain because it is used by other cases that assume the # disk found on that specific domain for domain in [SD_ISCSI_NAME, SD_SECOND_NFS_NAME]: disk_service.move(async=False, storage_domain=types.StorageDomain(name=domain)) testlib.assert_true_within_long( lambda: api.follow_link(disk_service.get().storage_domains[0]).name == domain and (disk_service.get().status == types.DiskStatus.OK))
def test_hotunplug_disk(api_v4): engine = api_v4.system_service() disk_service = test_utils.get_disk_service(engine, DISK0_NAME) disk_attachments_service = test_utils.get_disk_attachments_service( engine, VM0_NAME) disk_attachment = disk_attachments_service.attachment_service( disk_service.get().id) with test_utils.TestEvent(engine, 2002): # USER_HOTUNPLUG_DISK(2,002) assert disk_attachment.update(types.DiskAttachment(active=False)) testlib.assert_true_within_short( lambda: disk_attachment.get().active == False)
def hotunplug_disk(api): engine = api.system_service() disk_service = test_utils.get_disk_service(engine, DISK0_NAME) disk_attachments_service = test_utils.get_disk_attachments_service(engine, VM0_NAME) disk_attachment = disk_attachments_service.attachment_service(disk_service.get().id) with test_utils.TestEvent(engine, 2002): # USER_HOTUNPLUG_DISK(2,002) nt.assert_true( disk_attachment.update(types.DiskAttachment(active=False)) ) testlib.assert_true_within_short( lambda: disk_attachment.get().active == False )
def cold_storage_migration(engine_api): engine = engine_api.system_service() disk_service = test_utils.get_disk_service(engine, DISK2_NAME) # Cold migrate the disk to ISCSI storage domain and then migrate it back # to the NFS domain because it is used by other cases that assume the # disk found on that specific domain for domain in [SD_ISCSI_NAME, SD_SECOND_NFS_NAME]: with engine_utils.wait_for_event(engine, 2008): # USER_MOVED_DISK(2,008) disk_service.move(async=False, storage_domain=types.StorageDomain(name=domain)) assertions.assert_true_within_long(lambda: engine_api.follow_link( disk_service.get().storage_domains[0]).name == domain) assertions.assert_true_within_long( lambda: disk_service.get().status == types.DiskStatus.OK)
def add_directlun(prefix): # Find LUN GUIDs iscsi_host = prefix.virt_env.get_vm(SD_ISCSI_HOST_NAME) ret = iscsi_host.ssh(['cat', '/root/multipath.txt']) nt.assert_equals(ret.code, 0) all_guids = ret.out.splitlines() # Take the first unused LUN. 0-(SD_ISCSI_NR_LUNS) are used by iSCSI SD lun_guid = all_guids[SD_ISCSI_NR_LUNS] ips = iscsi_host.all_ips() luns = [] for ip in ips: lun = types.LogicalUnit( id=lun_guid, address=ip, port=SD_ISCSI_PORT, target=SD_ISCSI_TARGET, username='******', password='******', ) luns.append(lun) dlun_params = types.Disk( name=DLUN_DISK_NAME, format=types.DiskFormat.RAW, lun_storage=types.HostStorage( type=types.StorageType.ISCSI, logical_units=luns, ), # sgio=types.ScsiGenericIO.UNFILTERED, ) api = prefix.virt_env.engine_vm().get_api_v4() engine = api.system_service() disk_attachments_service = (test_utils.get_disk_attachments_service( engine, VM0_NAME)) disk_attachments_service.add( types.DiskAttachment(disk=dlun_params, interface=types.DiskInterface.VIRTIO_SCSI)) disk_service = test_utils.get_disk_service(engine, DLUN_DISK_NAME) attachment_service = (disk_attachments_service.attachment_service( disk_service.get().id)) nt.assert_not_equal(attachment_service.get(), None, 'Direct LUN disk not attached')
def test_extend_disk1(api_v4): engine = api_v4.system_service() disk_attachments_service = test_utils.get_disk_attachments_service( engine, VM1_NAME) for disk_attachment in disk_attachments_service.list(): disk = api_v4.follow_link(disk_attachment.disk) if disk.name == DISK1_NAME: attachment_service = disk_attachments_service.attachment_service( disk_attachment.id) with test_utils.TestEvent(engine, 371): # USER_EXTEND_DISK_SIZE_SUCCESS(371) attachment_service.update( types.DiskAttachment(disk=types.Disk(provisioned_size=2 * GB, ))) disk_service = test_utils.get_disk_service(engine, DISK1_NAME) testlib.assert_true_within_short( lambda: disk_service.get().status == types.DiskStatus.OK) testlib.assert_true_within_short( lambda: disk_service.get().provisioned_size == 2 * GB)
def extend_disk1(api): engine = api.system_service() disk_attachments_service = (test_utils.get_disk_attachments_service( engine, VM1_NAME)) for disk_attachment in disk_attachments_service.list(): disk = api.follow_link(disk_attachment.disk) if disk.name == DISK1_NAME: attachment_service = (disk_attachments_service.attachment_service( disk_attachment.id)) attachment_service.update( types.DiskAttachment(disk=types.Disk(provisioned_size=2 * GB, ))) disk_service = test_utils.get_disk_service(engine, DISK1_NAME) testlib.assert_true_within_short( lambda: disk_service.get().status == types.DiskStatus.OK and \ disk_service.get().provisioned_size == 2 * GB )
def test_ovf_import(api_v4): # Read the OVF file and replace the disk id engine = api_v4.system_service() disk_service = test_utils.get_disk_service(engine, DISK0_NAME) disk_id = disk_service.get().id ovf_file = os.path.join(os.environ['SUITE'], 'files', 'test-vm.ovf') ovf_text = open(ovf_file).read() ovf_text = ovf_text.replace( "ovf:diskId='52df5324-2230-40d9-9d3d-8cbb2aa33ba6'", "ovf:diskId='%s'" % (disk_id, )) # Upload OVF vms_service = engine.vms_service() vms_service.add( types.Vm(name=OVF_VM_NAME, cluster=types.Cluster(name=TEST_CLUSTER, ), initialization=types.Initialization( configuration=types.Configuration( type=types.ConfigurationType.OVA, data=ovf_text)))) # Check the VM exists assert test_utils.get_vm_service(engine, OVF_VM_NAME) is not None
def live_storage_migration(api): engine = api.system_service() vm_service = test_utils.get_vm_service(engine, VM0_NAME) disk_service = test_utils.get_disk_service(engine, DISK0_NAME) disk_service.move(async=False, filter=False, storage_domain=types.StorageDomain(name=SD_ISCSI_NAME)) snapshots_service = vm_service.snapshots_service() # Assert that the disk is on the correct storage domain, # its status is OK and the snapshot created for the migration # has been merged testlib.assert_equals_within_long( lambda: api.follow_link(disk_service.get().storage_domains[0]).name == SD_ISCSI_NAME and \ len(snapshots_service.list()) == 1 and \ disk_service.get().status, types.DiskStatus.OK) # This sleep is a temporary solution to the race condition # https://bugzilla.redhat.com/1456504 time.sleep(3)
def test_hotunplug_disk(engine_api): engine = engine_api.system_service() disk_service = test_utils.get_disk_service(engine, DISK0_NAME) disk_attachments_service = test_utils.get_disk_attachments_service( engine, VM0_NAME) disk_attachment = disk_attachments_service.attachment_service( disk_service.get().id) with engine_utils.wait_for_event(engine, 2002): # USER_HOTUNPLUG_DISK(2,002) correlation_id = 'test_hotunplug_disk' assert disk_attachment.update(types.DiskAttachment(active=False), query={'correlation_id': correlation_id}) assertions.assert_true_within_long( lambda: test_utils.all_jobs_finished(engine, correlation_id)) assertions.assert_true_within_short( lambda: disk_service.get().status == types.DiskStatus.OK) assertions.assert_true_within_short( lambda: disk_attachment.get().active == False)
def extend_disk1(api): engine = api.system_service() disk_attachments_service = test_utils.get_disk_attachments_service(engine, VM1_NAME) for disk_attachment in disk_attachments_service.list(): disk = api.follow_link(disk_attachment.disk) if disk.name == DISK1_NAME: attachment_service = disk_attachments_service.attachment_service(disk_attachment.id) with test_utils.TestEvent(engine, 371): # USER_EXTEND_DISK_SIZE_SUCCESS(371) attachment_service.update( types.DiskAttachment( disk=types.Disk(provisioned_size=2 * GB,))) disk_service = test_utils.get_disk_service(engine, DISK1_NAME) testlib.assert_true_within_short( lambda: disk_service.get().status == types.DiskStatus.OK ) testlib.assert_true_within_short( lambda: disk_service.get().provisioned_size == 2 * GB )
def cold_storage_migration(api): engine = api.system_service() disk_service = test_utils.get_disk_service(engine, DISK2_NAME) # Cold migrate the disk to ISCSI storage domain and then migrate it back # to the NFS domain because it is used by other cases that assume the # disk found on that specific domain for domain in [SD_ISCSI_NAME, SD_SECOND_NFS_NAME]: with test_utils.TestEvent(engine, 2008): # USER_MOVED_DISK(2,008) disk_service.move( async=False, storage_domain=types.StorageDomain( name=domain ) ) testlib.assert_true_within_long( lambda: api.follow_link( disk_service.get().storage_domains[0]).name == domain ) testlib.assert_true_within_long( lambda: disk_service.get().status == types.DiskStatus.OK )
def sparsify_disk1(api): engine = api.system_service() disk_service = test_utils.get_disk_service(engine, DISK1_NAME) disk_service.sparsify() testlib.assert_true_within_short( lambda: disk_service.get().status == types.DiskStatus.OK)