def backing_image_basic_operation_test(client, volume_name, bi_name, bi_url): # NOQA """ Test Backing Image APIs. 1. Create a backing image. 2. Create and attach a Volume with the backing image set. 3. Verify that the all disk states in the backing image are "downloaded". 4. Try to use the API to manually clean up one disk for the backing image but get failed. 5. Try to use the API to directly delete the backing image but get failed. 6. Delete the volume. 7. Use the API to manually clean up one disk for the backing image 8. Delete the backing image. """ volume = create_and_check_volume(client, volume_name, 3, str(BACKING_IMAGE_EXT4_SIZE), bi_name) lht_host_id = get_self_host_id() volume.attach(hostId=lht_host_id) volume = wait_for_volume_healthy(client, volume_name) assert volume.backingImage == bi_name assert volume.size == str(BACKING_IMAGE_EXT4_SIZE) random_disk_id = "" backing_image = client.by_id_backing_image(bi_name) assert backing_image.sourceType == BACKING_IMAGE_SOURCE_TYPE_DOWNLOAD assert backing_image.parameters["url"] == bi_url assert backing_image.currentChecksum != "" assert not backing_image.deletionTimestamp assert len(backing_image.diskFileStatusMap) == 3 for disk_id, status in iter(backing_image.diskFileStatusMap.items()): assert status.state == "ready" random_disk_id = disk_id assert random_disk_id != '' with pytest.raises(Exception): backing_image.backingImageCleanup(disks=[random_disk_id]) with pytest.raises(Exception): client.delete(backing_image) client.delete(volume) wait_for_volume_delete(client, volume_name) backing_image = client.by_id_backing_image(bi_name) backing_image.backingImageCleanup(disks=[random_disk_id]) backing_image = wait_for_backing_image_disk_cleanup( client, bi_name, random_disk_id) client.delete(backing_image)
def test_setting_backing_image_auto_cleanup(client, core_api, volume_name): # NOQA """ Test that the Backing Image Cleanup Wait Interval setting works correctly. The default value of setting `BackingImageCleanupWaitInterval` is 60. 1. Create a backing image. 2. Create multiple volumes using the backing image. 3. Attach all volumes, Then: 1. Wait for all volumes can become running. 2. Verify the correct in all volumes. 3. Verify the backing image disk status map. 4. Verify the only backing image file in each disk is reused by multiple replicas. The backing image file path is `<Data path>/<The backing image name>/backing` 4. Unschedule test node to guarantee when replica removed from test node, no new replica can be rebuilt on the test node. 5. Remove all replicas in one disk. Wait for 50 seconds. Then verify nothing changes in the backing image disk state map (before the cleanup wait interval is passed). 6. Modify `BackingImageCleanupWaitInterval` to a small value. Then verify: 1. The download state of the disk containing no replica becomes terminating first, and the entry will be removed from the map later. 2. The related backing image file is removed. 3. The download state of other disks keep unchanged. All volumes still work fine. 7. Delete all volumes. Verify that there will only remain 1 entry in the backing image disk map 8. Delete the backing image. """ # Step 1 create_backing_image_with_matching_url(client, BACKING_IMAGE_NAME, BACKING_IMAGE_QCOW2_URL) # Step 2 volume_names = [volume_name + "-1", volume_name + "-2", volume_name + "-3"] for volume_name in volume_names: volume = create_and_check_volume(client, volume_name, 3, str(BACKING_IMAGE_EXT4_SIZE), BACKING_IMAGE_NAME) # Step 3 lht_host_id = get_self_host_id() for volume_name in volume_names: volume = client.by_id_volume(volume_name) volume.attach(hostId=lht_host_id) wait_for_volume_healthy(client, volume_name) assert volume.backingImage == BACKING_IMAGE_NAME backing_image = client.by_id_backing_image(BACKING_IMAGE_NAME) assert len(backing_image.diskFileStatusMap) == 3 for disk_id, status in iter(backing_image.diskFileStatusMap.items()): assert status.state == "ready" backing_images_in_disk = os.listdir("/var/lib/longhorn/backing-images") assert len(backing_images_in_disk) == 1 assert os.path.exists("/var/lib/longhorn/backing-images/{}/backing".format( backing_images_in_disk[0])) assert os.path.exists( "/var/lib/longhorn/backing-images/{}/backing.cfg".format( backing_images_in_disk[0])) # Step 4 current_host = client.by_id_node(id=lht_host_id) client.update(current_host, allowScheduling=False) wait_for_node_update(client, lht_host_id, "allowScheduling", False) # Step 5 for volume_name in volume_names: volume = client.by_id_volume(volume_name) for replica in volume.replicas: if replica.hostId == lht_host_id: replica_name = replica.name volume.replicaRemove(name=replica_name) # This wait interval should be smaller than the setting value. # Otherwise, the backing image files may be cleaned up. time.sleep(int(BACKING_IMAGE_CLEANUP_WAIT_INTERVAL)) check_backing_image_disk_map_status(client, BACKING_IMAGE_NAME, 3, "ready") # Step 6 update_setting(client, "backing-image-cleanup-wait-interval", "1") check_backing_image_disk_map_status(client, BACKING_IMAGE_NAME, 2, "ready") backing_images_in_disk = os.listdir("/var/lib/longhorn/backing-images") assert len(backing_images_in_disk) == 0 # Step 7 for volume_name in volume_names: volume = client.by_id_volume(volume_name) client.delete(volume) wait_for_volume_delete(client, volume_name) check_backing_image_disk_map_status(client, BACKING_IMAGE_NAME, 1, "ready")
def backing_image_content_test(client, volume_name_prefix, bi_name, bi_url): # NOQA """ Verify the content of the Backing Image is accessible and read-only for all volumes. 1. Create a backing image. (Done by the caller) 2. Create a Volume with the backing image set then attach it to host node. 3. Verify that the all disk states in the backing image are "downloaded". 4. Verify volume can be directly mounted and there is already data in the filesystem due to the backing image. 5. Verify the volume r/w. 6. Launch one more volume with the same backing image. 7. Verify the data content of the new volume is the same as the data in step 4. 5. Do cleanup. (Done by the caller) """ lht_host_id = get_self_host_id() volume_name1 = volume_name_prefix + "-1" volume1 = create_and_check_volume( client, volume_name1, 3, str(BACKING_IMAGE_EXT4_SIZE), bi_name) volume1.attach(hostId=lht_host_id) volume1 = wait_for_volume_healthy(client, volume_name1) assert volume1.backingImage == bi_name assert volume1.size == str(BACKING_IMAGE_EXT4_SIZE) backing_image = client.by_id_backing_image(bi_name) assert backing_image.imageURL == bi_url assert not backing_image.deletionTimestamp assert len(backing_image.diskStateMap) == 3 for disk_id, state in iter(backing_image.diskStateMap.items()): assert state == "downloaded" # Since there is already a filesystem with data in the backing image, # we can directly mount and access the volume without `mkfs`. dev1 = get_volume_endpoint(volume1) mount_path1 = os.path.join(DIRECTORY_PATH, volume_name1) mount_disk(dev1, mount_path1) output1 = subprocess.check_output(["ls", mount_path1]) # The following random write may crash the filesystem of volume1, # need to umount it here cleanup_host_disk(volume_name1) # Verify r/w for the volume with a backing image data = write_volume_random_data(volume1) check_volume_data(volume1, data) volume_name2 = volume_name_prefix + "-2" volume2 = create_and_check_volume( client, volume_name2, 3, str(BACKING_IMAGE_EXT4_SIZE), bi_name) volume2.attach(hostId=lht_host_id) volume2 = wait_for_volume_healthy(client, volume_name2) assert volume1.backingImage == bi_name assert volume1.size == str(BACKING_IMAGE_EXT4_SIZE) dev2 = get_volume_endpoint(volume2) mount_path2 = os.path.join(DIRECTORY_PATH, volume_name2) mount_disk(dev2, mount_path2) output2 = subprocess.check_output(["ls", mount_path2]) # The output is the content of the backing image, which should keep # unchanged assert output2 == output1 cleanup_host_disk(volume_name2)