def test_single_replica_expansion_failed(grpc_controller, grpc_fixed_dir_replica1, grpc_fixed_dir_replica2): # NOQA """ The test flow: 1. Write random data into the block device. 2. Create the 1st snapshot. 3. Create an empty directory using the tmp meta file path of the expansion disk for replica1. 4. Try to expand the volume. replica1 will be directly marked as ERR state. Finally the volume expansion should succeed. 5. Check the volume status, and if the expanded volume works fine: r/w data then create the 2nd snapshot. 6. Rebuild replica1 and check the replica1 is expanded automatically. 7. Delete replica2 then check if the rebuilt replica1 works fine. """ address = grpc_controller.address r1_url = grpc_fixed_dir_replica1.address r2_url = grpc_fixed_dir_replica2.address dev = get_dev(grpc_fixed_dir_replica1, grpc_fixed_dir_replica2, grpc_controller) replicas = grpc_controller.replica_list() assert len(replicas) == 2 assert replicas[0].mode == "RW" assert replicas[1].mode == "RW" # the default size is 4MB, will expand it to 8MB zero_char = b'\x00'.decode('utf-8') # write the data to the original part then do expansion data1_len = random_length(PAGE_SIZE) data1 = Data(random.randrange(0, SIZE - 2 * PAGE_SIZE, PAGE_SIZE), data1_len, random_string(data1_len)) snap1 = Snapshot(dev, data1, address) disk_meta_tmp_1 = os.path.join(FIXED_REPLICA_PATH1, EXPANSION_DISK_TMP_META_NAME) os.mkdir(disk_meta_tmp_1) # replica1 will fail to expand the size, # then engine will directly mark it as ERR state. # Finally, The volume expansion should succeed since replica2 works fine. grpc_controller.volume_frontend_shutdown() grpc_controller.volume_expand(EXPANDED_SIZE) wait_for_volume_expansion(grpc_controller, EXPANDED_SIZE) grpc_controller.volume_frontend_start(FRONTEND_TGT_BLOCKDEV) volume_info = grpc_controller.volume_get() assert volume_info.last_expansion_error != "" assert volume_info.last_expansion_failed_at != "" verify_replica_state(grpc_controller, r1_url, "ERR") verify_replica_state(grpc_controller, r2_url, "RW") expansion_disk_2 = os.path.join(FIXED_REPLICA_PATH2, EXPANSION_DISK_NAME) disk_meta_tmp_2 = os.path.join(FIXED_REPLICA_PATH2, EXPANSION_DISK_TMP_META_NAME) assert os.path.exists(expansion_disk_2) assert not os.path.exists(disk_meta_tmp_2) # The meta info file should keep unchanged replica_meta_file_2 = os.path.join(FIXED_REPLICA_PATH2, REPLICA_META_FILE_NAME) with open(replica_meta_file_2) as f: replica_meta_2 = json.load(f) assert replica_meta_2["Size"] == EXPANDED_SIZE # Cleanup replica1 then check if replica2 works fine cleanup_replica(grpc_fixed_dir_replica1) verify_replica_state(grpc_controller, r1_url, "ERR") grpc_controller.replica_delete(replicas[0].address) snap1.verify_data() data2_len = random_length(PAGE_SIZE) data2 = Data(SIZE - PAGE_SIZE, data2_len, random_string(data2_len)) snap2 = Snapshot(dev, data2, address) snap2.verify_data() assert dev.readat(SIZE, SIZE) == zero_char * SIZE # Rebuild replica1. # The newly opened replica1 will be expanded automatically open_replica(grpc_fixed_dir_replica1) cmd.add_replica(address, grpc_fixed_dir_replica1.url) wait_for_rebuild_complete(address) r1 = grpc_fixed_dir_replica1.replica_get() assert r1.size == EXPANDED_SIZE_STR verify_replica_state(grpc_controller, r1_url, "RW") replica_meta_file_1 = os.path.join(FIXED_REPLICA_PATH1, REPLICA_META_FILE_NAME) with open(replica_meta_file_1) as f: replica_meta_1 = json.load(f) assert replica_meta_1["Size"] == EXPANDED_SIZE # Delete replica2 then check if the rebuilt replica1 works fine cleanup_replica(grpc_fixed_dir_replica2) verify_replica_state(grpc_controller, r2_url, "ERR") grpc_controller.replica_delete(replicas[1].address) data3_len = random_length(PAGE_SIZE) data3 = Data(random.randrange(SIZE, EXPANDED_SIZE - PAGE_SIZE, PAGE_SIZE), data3_len, random_string(data3_len)) snap3 = Snapshot(dev, data3, address) snap1.verify_data() snap2.verify_data() snap3.verify_data() assert \ dev.readat(SIZE, SIZE) == zero_char*(data3.offset-SIZE) + \ data3.content + zero_char*(EXPANDED_SIZE-data3.offset-data3.length)
def test_expansion_with_rebuild( grpc_controller, # NOQA grpc_replica1, grpc_replica2): # NOQA address = grpc_controller.address dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) replicas = grpc_controller.replica_list() assert len(replicas) == 2 assert replicas[0].mode == "RW" assert replicas[1].mode == "RW" # the default size is 4MB, will expand it to 8MB address = grpc_controller.address zero_char = b'\x00'.decode('utf-8') original_data = zero_char * SIZE # write the data to the original part then do expansion data1_len = random_length(PAGE_SIZE) data1 = Data(random.randrange(0, SIZE - 2 * PAGE_SIZE, PAGE_SIZE), data1_len, random_string(data1_len)) snap1 = Snapshot(dev, data1, address) expand_volume_with_frontend(grpc_controller, EXPANDED_SIZE) wait_and_check_volume_expansion(grpc_controller, EXPANDED_SIZE) snap1.verify_data() assert \ dev.readat(0, SIZE) == \ original_data[0:data1.offset] + data1.content + \ original_data[data1.offset+data1.length:] assert dev.readat(SIZE, SIZE) == zero_char * SIZE # write the data to both the original part and the expanded part data2_len = random_length(PAGE_SIZE) data2 = Data(SIZE - PAGE_SIZE, data2_len, random_string(data2_len)) snap2 = Snapshot(dev, data2, address) data3_len = random_length(PAGE_SIZE) data3 = Data(random.randrange(SIZE, EXPANDED_SIZE - PAGE_SIZE, PAGE_SIZE), data3_len, random_string(data3_len)) snap3 = Snapshot(dev, data3, address) snap1.verify_data() snap2.verify_data() snap3.verify_data() assert \ dev.readat(SIZE, SIZE) == zero_char*(data3.offset-SIZE) + \ data3.content + zero_char*(EXPANDED_SIZE-data3.offset-data3.length) # Cleanup replica2 cleanup_replica(grpc_replica2) verify_replica_state(grpc_controller, grpc_replica2.address, "ERR") grpc_controller.replica_delete(replicas[1].address) # Rebuild replica2. open_replica(grpc_replica2) # The newly opened replica2 will be expanded automatically cmd.add_replica(address, grpc_replica2.url) wait_for_rebuild_complete(address) verify_replica_state(grpc_controller, grpc_replica2.address, "RW") # Cleanup replica1 then check if the rebuilt replica2 works fine cleanup_replica(grpc_replica1) verify_replica_state(grpc_controller, grpc_replica1.address, "ERR") grpc_controller.replica_delete(replicas[0].address) assert \ dev.readat(0, SIZE) == \ original_data[0:data1.offset] + data1.content + \ original_data[data1.offset+data1.length:data2.offset] + \ data2.content + \ original_data[data2.offset+data2.length:] assert \ dev.readat(SIZE, SIZE) == zero_char*(data3.offset-SIZE) + \ data3.content + zero_char*(EXPANDED_SIZE-data3.offset-data3.length) data4_len = random_length(PAGE_SIZE) data4 = Data(data1.offset, data4_len, random_string(data4_len)) snap4 = Snapshot(dev, data4, address) snap4.verify_data()
def test_expansion_rollback_with_rebuild(grpc_controller, grpc_fixed_dir_replica1, grpc_fixed_dir_replica2): # NOQA """ The test flow: 1. Write random data into the block device. 2. Create the 1st snapshot. 3. Create an empty directory using the tmp meta file path of the expansion disk for each replica. This will fail the following expansion and trigger expansion rollback. 4. Try to expand the volume but fails. Then the automatic rollback will be applied implicitly. 5. Check the volume status and if there are leftovers of the failed expansion. 6. Check if the volume is still usable by r/w data, then create the 2nd snapshot. 7. Retry expansion. It should succeed. 8. Verify the data and try data r/w. 9. Delete then rebuild the replica2. Then rebuilt replica2 will be expanded automatically. 10. Delete the replica1 then check if the rebuilt replica2 works fine. """ address = grpc_controller.address r1_url = grpc_fixed_dir_replica1.address r2_url = grpc_fixed_dir_replica2.address dev = get_dev(grpc_fixed_dir_replica1, grpc_fixed_dir_replica2, grpc_controller) replicas = grpc_controller.replica_list() assert len(replicas) == 2 assert replicas[0].mode == "RW" assert replicas[1].mode == "RW" # the default size is 4MB, will expand it to 8MB zero_char = b'\x00'.decode('utf-8') original_data = zero_char * SIZE # write the data to the original part then do expansion data1_len = random_length(PAGE_SIZE) data1 = Data(random.randrange(0, SIZE - 2 * PAGE_SIZE, PAGE_SIZE), data1_len, random_string(data1_len)) snap1 = Snapshot(dev, data1, address) # use the tmp meta file path of expansion disks to create empty directories # so that the expansion disk meta data update will fail. # Then expansion will fail and the rollback will be triggered. disk_meta_tmp_1 = os.path.join(FIXED_REPLICA_PATH1, EXPANSION_DISK_TMP_META_NAME) disk_meta_tmp_2 = os.path.join(FIXED_REPLICA_PATH2, EXPANSION_DISK_TMP_META_NAME) os.mkdir(disk_meta_tmp_1) os.mkdir(disk_meta_tmp_2) # All replicas' expansion will fail # then engine will do rollback automatically grpc_controller.volume_frontend_shutdown() grpc_controller.volume_expand(EXPANDED_SIZE) wait_for_volume_expansion(grpc_controller, SIZE) grpc_controller.volume_frontend_start(FRONTEND_TGT_BLOCKDEV) # Expansion should fail but the expansion rollback should succeed volume_info = grpc_controller.volume_get() assert volume_info.last_expansion_error != "" assert volume_info.last_expansion_failed_at != "" verify_replica_state(grpc_controller, r1_url, "RW") verify_replica_state(grpc_controller, r2_url, "RW") # The invalid disk and head will be cleaned up automatically # after the rollback expansion_disk_1 = os.path.join(FIXED_REPLICA_PATH1, EXPANSION_DISK_NAME) expansion_disk_2 = os.path.join(FIXED_REPLICA_PATH2, EXPANSION_DISK_NAME) assert not os.path.exists(expansion_disk_1) assert not os.path.exists(expansion_disk_2) assert not os.path.exists(disk_meta_tmp_1) assert not os.path.exists(disk_meta_tmp_2) # The meta info file should keep unchanged replica_meta_file_1 = os.path.join(FIXED_REPLICA_PATH1, REPLICA_META_FILE_NAME) replica_meta_file_2 = os.path.join(FIXED_REPLICA_PATH2, REPLICA_META_FILE_NAME) with open(replica_meta_file_1) as f: replica_meta_1 = json.load(f) assert replica_meta_1["Size"] == SIZE with open(replica_meta_file_2) as f: replica_meta_2 = json.load(f) assert replica_meta_2["Size"] == SIZE # try to check then write new data snap1.verify_data() data2_len = random_length(PAGE_SIZE) data2 = Data(SIZE - PAGE_SIZE, data2_len, random_string(data2_len)) snap2 = Snapshot(dev, data2, address) # Retry expansion expand_volume_with_frontend(grpc_controller, EXPANDED_SIZE) wait_and_check_volume_expansion(grpc_controller, EXPANDED_SIZE) with open(replica_meta_file_1) as f: replica_meta_1 = json.load(f) assert replica_meta_1["Size"] == EXPANDED_SIZE with open(replica_meta_file_2) as f: replica_meta_2 = json.load(f) assert replica_meta_2["Size"] == EXPANDED_SIZE assert os.path.exists(expansion_disk_1) assert os.path.exists(expansion_disk_2) snap1.verify_data() snap2.verify_data() assert dev.readat(SIZE, SIZE) == zero_char * SIZE data3_len = random_length(PAGE_SIZE) data3 = Data(random.randrange(SIZE, EXPANDED_SIZE - PAGE_SIZE, PAGE_SIZE), data3_len, random_string(data3_len)) snap3 = Snapshot(dev, data3, address) snap1.verify_data() snap2.verify_data() snap3.verify_data() assert \ dev.readat(SIZE, SIZE) == zero_char*(data3.offset-SIZE) + \ data3.content + zero_char*(EXPANDED_SIZE-data3.offset-data3.length) # Delete replica2 cleanup_replica(grpc_fixed_dir_replica2) verify_replica_state(grpc_controller, r2_url, "ERR") grpc_controller.replica_delete(replicas[1].address) # Rebuild replica2. open_replica(grpc_fixed_dir_replica2) # The newly opened replica2 will be expanded automatically cmd.add_replica(address, grpc_fixed_dir_replica2.url) wait_for_rebuild_complete(address) verify_replica_state(grpc_controller, r2_url, "RW") # Cleanup replica1 then check if the rebuilt replica2 works fine cleanup_replica(grpc_fixed_dir_replica1) verify_replica_state(grpc_controller, r1_url, "ERR") grpc_controller.replica_delete(replicas[0].address) assert \ dev.readat(0, SIZE) == \ original_data[0:data1.offset] + data1.content + \ original_data[data1.offset+data1.length:data2.offset] + \ data2.content + \ original_data[data2.offset+data2.length:] assert \ dev.readat(SIZE, SIZE) == zero_char*(data3.offset-SIZE) + \ data3.content + zero_char*(EXPANDED_SIZE-data3.offset-data3.length) data4_len = random_length(PAGE_SIZE) data4 = Data(data1.offset, data4_len, random_string(data4_len)) snap4 = Snapshot(dev, data4, address) snap4.verify_data()
def test_inc_restore_with_rebuild_and_expansion(grpc_controller, grpc_replica1, grpc_replica2, grpc_controller_no_frontend, grpc_fixed_dir_replica1, grpc_fixed_dir_replica2, backup_targets): # NOQA # Pick up a random backup target. backup_target = backup_targets[random.randint(0, 1)] address = grpc_controller.address dr_address = grpc_controller_no_frontend.address try: cmd.backup_volume_rm(address, VOLUME_NAME, backup_target) except Exception: pass dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) start_no_frontend_volume(grpc_controller_no_frontend, grpc_fixed_dir_replica1) data0_len = random_length(PAGE_SIZE) data0 = Data(random.randrange(0, SIZE - 2 * PAGE_SIZE, PAGE_SIZE), data0_len, random_string(data0_len)) snap0 = Snapshot(dev, data0, address) backup0_info = create_backup(address, snap0.name, backup_target) assert backup0_info["VolumeName"] == VOLUME_NAME assert backup0_info["Size"] == str(BLOCK_SIZE) cmd.backup_restore(dr_address, backup0_info["URL"]) wait_for_restore_completion(dr_address, backup0_info["URL"]) verify_no_frontend_data(data0.offset, data0.content, grpc_controller_no_frontend) expand_volume_with_frontend(grpc_controller, EXPANDED_SIZE) wait_and_check_volume_expansion(grpc_controller, EXPANDED_SIZE) data1_len = random_length(PAGE_SIZE) data1 = Data(random.randrange(SIZE, EXPANDED_SIZE - PAGE_SIZE, PAGE_SIZE), data1_len, random_string(data1_len)) snap1 = Snapshot(dev, data1, address) backup1_info = create_backup(address, snap1.name, backup_target, EXPANDED_SIZE_STR) assert backup1_info["VolumeName"] == VOLUME_NAME assert backup1_info["Size"] == str(2 * BLOCK_SIZE) backup_volumes = cmd.backup_volume_list(address, VOLUME_NAME, backup_target) assert VOLUME_NAME in backup_volumes url = get_backup_volume_url(backup_target, VOLUME_NAME) backup_info = cmd.backup_inspect_volume(address, url) assert backup_info["Size"] == EXPANDED_SIZE_STR # restore command invocation should error out with pytest.raises(subprocess.CalledProcessError) as e: cmd.backup_restore(dr_address, backup1_info["URL"]) assert "need to expand the DR volume" in e.value.stdout # The above restore error is triggered before calling the replicas. # Hence the error won't be recorded in the restore status # and we can continue restoring backups for the DR volume. rs = cmd.restore_status(dr_address) for status in rs.values(): assert status['backupURL'] == backup0_info["URL"] assert status['lastRestored'] == backup0_info["Name"] assert 'error' not in status.keys() assert not status["isRestoring"] grpc_controller_no_frontend.volume_expand(EXPANDED_SIZE) wait_for_volume_expansion(grpc_controller_no_frontend, EXPANDED_SIZE) # This restore command will trigger snapshot purge. # And the error is triggered before calling the replicas. with pytest.raises(subprocess.CalledProcessError) as e: cmd.backup_restore(dr_address, backup1_info["URL"]) assert "found more than 1 snapshot in the replicas, " \ "hence started to purge snapshots before the restore" \ in e.value.stdout wait_for_purge_completion(dr_address) snaps_info = cmd.snapshot_info(dr_address) assert len(snaps_info) == 2 volume_head_name = "volume-head" snap_name = "expand-" + EXPANDED_SIZE_STR head_info = snaps_info[volume_head_name] assert head_info["name"] == volume_head_name assert head_info["parent"] == snap_name assert not head_info["children"] assert head_info["usercreated"] is False snap_info = snaps_info[snap_name] assert snap_info["name"] == snap_name assert not snap_info["parent"] assert volume_head_name in snap_info["children"] assert snap_info["usercreated"] is False cmd.backup_restore(dr_address, backup1_info["URL"]) wait_for_restore_completion(dr_address, backup1_info["URL"]) verify_no_frontend_data(data1.offset, data1.content, grpc_controller_no_frontend) # For DR volume, the rebuilding replica won't be expanded automatically. open_replica(grpc_fixed_dir_replica2) with pytest.raises(subprocess.CalledProcessError): cmd.add_replica(dr_address, grpc_fixed_dir_replica2.url, True) # Manually expand the rebuilding replica then retry `add-replica`. grpc_fixed_dir_replica2.replica_open() grpc_fixed_dir_replica2.replica_expand(EXPANDED_SIZE) grpc_fixed_dir_replica2.replica_close() cmd.add_replica(dr_address, grpc_fixed_dir_replica2.url, True) replicas = grpc_controller_no_frontend.replica_list() assert len(replicas) == 2 rw_replica, wo_replica = 0, 0 for r in replicas: if r.mode == 'RW': rw_replica += 1 else: assert r.mode == "WO" wo_replica += 1 assert rw_replica == 1 and wo_replica == 1 # The old replica will fail the restore but the error won't be recorded. # Then rebuilding replica will start full restore. with pytest.raises(subprocess.CalledProcessError) as e: cmd.backup_restore(dr_address, backup1_info["URL"]) assert "already restored backup" in e.value.stdout wait_for_restore_completion(dr_address, backup1_info["URL"]) cmd.verify_rebuild_replica(dr_address, grpc_fixed_dir_replica2.url) replicas = grpc_controller_no_frontend.replica_list() assert len(replicas) == 2 for r in replicas: assert r.mode == 'RW' verify_no_frontend_data(data1.offset, data1.content, grpc_controller_no_frontend) cmd.backup_volume_rm(grpc_controller.address, VOLUME_NAME, backup_target)
def test_restore_with_rebuild(grpc_controller, grpc_replica1, grpc_replica2, grpc_controller_no_frontend, grpc_fixed_dir_replica1, grpc_fixed_dir_replica2, backup_targets): # NOQA # Pick up a random backup target. backup_target = backup_targets[random.randint(0, 1)] address = grpc_controller.address dr_address = grpc_controller_no_frontend.address try: cmd.backup_volume_rm(address, VOLUME_NAME, backup_target) except Exception: pass dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) start_no_frontend_volume(grpc_controller_no_frontend, grpc_fixed_dir_replica1) data0_len = random_length(PAGE_SIZE) data0 = Data(random.randrange(0, SIZE - 2 * PAGE_SIZE, PAGE_SIZE), data0_len, random_string(data0_len)) snap0 = Snapshot(dev, data0, address) backup0_info = create_backup(address, snap0.name, backup_target) assert backup0_info["VolumeName"] == VOLUME_NAME assert backup0_info["Size"] == str(BLOCK_SIZE) cmd.backup_restore(dr_address, backup0_info["URL"]) wait_for_restore_completion(dr_address, backup0_info["URL"]) verify_no_frontend_data(data0.offset, data0.content, grpc_controller_no_frontend) open_replica(grpc_fixed_dir_replica2) cmd.add_replica(dr_address, grpc_fixed_dir_replica2.url, True) replicas = grpc_controller_no_frontend.replica_list() assert len(replicas) == 2 rw_replica, wo_replica = 0, 0 for r in replicas: if r.mode == 'RW': rw_replica += 1 else: assert r.mode == "WO" wo_replica += 1 assert rw_replica == 1 and wo_replica == 1 # The old replica will fail the restore but the error won't be recorded. # Then rebuilding replica will start full restore. with pytest.raises(subprocess.CalledProcessError) as e: cmd.backup_restore(dr_address, backup0_info["URL"]) assert "already restored backup" in e.value.stdout wait_for_restore_completion(dr_address, backup0_info["URL"]) # Need to manually verify the rebuilding replica for the restore volume cmd.verify_rebuild_replica(dr_address, grpc_fixed_dir_replica2.url) replicas = grpc_controller_no_frontend.replica_list() assert len(replicas) == 2 for r in replicas: assert r.mode == 'RW' # Delete the old replica then check if the rebuilt replica works fine. cleanup_replica(grpc_fixed_dir_replica1) grpc_controller_no_frontend.replica_delete(grpc_fixed_dir_replica1.address) verify_no_frontend_data(data0.offset, data0.content, grpc_controller_no_frontend) cmd.backup_volume_rm(grpc_controller.address, VOLUME_NAME, backup_target)
def test_snapshot_rm_basic(grpc_controller, # NOQA grpc_replica1, grpc_replica2): # NOQA address = grpc_controller.address dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) existings = {} snap1 = Snapshot(dev, generate_random_data(existings), address) snap2 = Snapshot(dev, generate_random_data(existings), address) snap3 = Snapshot(dev, generate_random_data(existings), address) info = cmd.snapshot_info(address) assert len(info) == 4 assert VOLUME_HEAD in info assert snap1.name in info assert snap2.name in info assert snap3.name in info cmd.snapshot_rm(address, snap2.name) cmd.snapshot_purge(address) wait_for_purge_completion(address) info = cmd.snapshot_info(address) assert len(info) == 3 assert snap1.name in info assert snap3.name in info snap3.verify_checksum() snap2.verify_data() snap1.verify_data() snapshot_revert_with_frontend(address, ENGINE_NAME, snap1.name) snap3.refute_data() snap2.refute_data() snap1.verify_checksum()
def volume_expansion_with_snapshots_test(dev, grpc_controller, # NOQA volume_name, engine_name, original_data): # the default size is 4MB, will expand it to 8MB address = grpc_controller.address zero_char = b'\x00'.decode('utf-8') # write the data to the original part then do expansion data1_len = random_length(PAGE_SIZE) data1 = Data(random.randrange(0, SIZE-2*PAGE_SIZE, PAGE_SIZE), data1_len, random_string(data1_len)) snap1 = Snapshot(dev, data1, address) expand_volume_with_frontend(grpc_controller, EXPANDED_SIZE) wait_and_check_volume_expansion( grpc_controller, EXPANDED_SIZE) snap1.verify_data() assert \ dev.readat(0, SIZE) == \ original_data[0:data1.offset] + data1.content + \ original_data[data1.offset+data1.length:] assert dev.readat(SIZE, SIZE) == zero_char*SIZE # write the data to both the original part and the expanded part data2_len = random_length(PAGE_SIZE) data2 = Data(SIZE-PAGE_SIZE, data2_len, random_string(data2_len)) snap2 = Snapshot(dev, data2, address) data3_len = random_length(PAGE_SIZE) data3 = Data(random.randrange(SIZE, EXPANDED_SIZE-PAGE_SIZE, PAGE_SIZE), data3_len, random_string(data3_len)) snap3 = Snapshot(dev, data3, address) snap1.verify_data() snap2.verify_data() snap3.verify_data() assert \ dev.readat(SIZE, SIZE) == zero_char*(data3.offset-SIZE) + \ data3.content + zero_char*(EXPANDED_SIZE-data3.offset-data3.length) data4_len = random_length(PAGE_SIZE) data4 = Data(data1.offset, data4_len, random_string(data4_len)) snap4 = Snapshot(dev, data4, address) snap4.verify_data() # revert to snap1 then see if we can still r/w the existing data # and expanded part snapshot_revert_with_frontend(address, engine_name, snap1.name) assert \ dev.readat(0, SIZE) == \ original_data[0:data1.offset] + data1.content + \ original_data[data1.offset+data1.length:] assert dev.readat(SIZE, SIZE) == zero_char*SIZE data5_len = random_length(PAGE_SIZE) data5 = Data(random.randrange(SIZE, EXPANDED_SIZE-PAGE_SIZE, PAGE_SIZE), data5_len, random_string(data5_len)) snap5 = Snapshot(dev, data5, address) snap5.verify_data() assert \ dev.readat(SIZE, SIZE) == zero_char*(data5.offset-SIZE) + \ data5.content + zero_char*(EXPANDED_SIZE-data5.offset-data5.length) # delete and purge the snap1. it will coalesce with the larger snap2 cmd.snapshot_rm(address, snap1.name) cmd.snapshot_purge(address) wait_for_purge_completion(address) assert \ dev.readat(0, SIZE) == \ original_data[0:data1.offset] + data1.content + \ original_data[data1.offset+data1.length:] assert \ dev.readat(SIZE, SIZE) == zero_char*(data5.offset-SIZE) + \ data5.content + zero_char*(EXPANDED_SIZE-data5.offset-data5.length)
def snapshot_revert_test(dev, address, engine_name): # NOQA existings = {} snap1 = Snapshot(dev, generate_random_data(existings), address) snap2 = Snapshot(dev, generate_random_data(existings), address) snap3 = Snapshot(dev, generate_random_data(existings), address) snapList = cmd.snapshot_ls(address) assert snap1.name in snapList assert snap2.name in snapList assert snap3.name in snapList snapshot_revert_with_frontend(address, engine_name, snap2.name) snap3.refute_data() snap2.verify_checksum() snap1.verify_data() snapshot_revert_with_frontend(address, engine_name, snap1.name) snap3.refute_data() snap2.refute_data() snap1.verify_checksum()
def test_snapshot_rm_rolling(grpc_controller, # NOQA grpc_replica1, grpc_replica2): # NOQA address = grpc_controller.address dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) existings = {} snap1 = Snapshot(dev, generate_random_data(existings), address) snapList = cmd.snapshot_ls(address) assert snap1.name in snapList cmd.snapshot_rm(address, snap1.name) # cannot do anything because it's the parent of volume head cmd.snapshot_purge(address) wait_for_purge_completion(address) snap2 = Snapshot(dev, generate_random_data(existings), address) info = cmd.snapshot_info(address) assert len(info) == 3 assert snap1.name in info assert snap2.name in info assert info[snap1.name]["removed"] is True assert info[snap2.name]["removed"] is False cmd.snapshot_rm(address, snap2.name) # this should trigger the deletion of snap1 cmd.snapshot_purge(address) wait_for_purge_completion(address) snap2.verify_checksum() snap1.verify_data() snap3 = Snapshot(dev, generate_random_data(existings), address) snap4 = Snapshot(dev, generate_random_data(existings), address) snap5 = Snapshot(dev, generate_random_data(existings), address) snapList = cmd.snapshot_ls(address) assert snap1.name not in snapList assert snap2.name not in snapList assert snap3.name in snapList assert snap4.name in snapList assert snap5.name in snapList info = cmd.snapshot_info(address) assert len(info) == 5 assert snap1.name not in info assert snap2.name in info assert snap3.name in info assert snap4.name in info assert snap5.name in info assert info[snap2.name]["removed"] is True cmd.snapshot_rm(address, snap3.name) cmd.snapshot_rm(address, snap4.name) cmd.snapshot_rm(address, snap5.name) # this should trigger the deletion of snap2 - snap4 # and snap5 marked as removed cmd.snapshot_purge(address) wait_for_purge_completion(address) info = cmd.snapshot_info(address) assert len(info) == 2 assert snap1.name not in info assert snap2.name not in info assert snap3.name not in info assert snap4.name not in info assert snap5.name in info assert info[snap5.name]["removed"] is True snap5.verify_checksum() snap4.verify_data() snap3.verify_data() snap2.verify_data() snap1.verify_data()
def volume_expansion_with_backup_test( grpc_engine_manager, # NOQA grpc_controller, # NOQA grpc_dr_controller, # NOQA grpc_replica1, grpc_replica2, # NOQA grpc_dr_replica1, # NOQA grpc_dr_replica2, # NOQA volume_name, engine_name, backup_target): # NOQA address = grpc_controller.address dr_address = grpc_dr_controller.address dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) start_no_frontend_volume(grpc_engine_manager, grpc_dr_controller, grpc_dr_replica1, grpc_dr_replica2) try: cmd.backup_volume_rm(address, volume_name, backup_target) except Exception: pass data0_len = random_length(PAGE_SIZE) data0 = Data(random.randrange(0, SIZE - 2 * PAGE_SIZE, PAGE_SIZE), data0_len, random_string(data0_len)) snap0 = Snapshot(dev, data0, address) backup0_info = create_backup(address, snap0.name, backup_target) assert backup0_info["VolumeName"] == volume_name assert backup0_info["Size"] == str(BLOCK_SIZE) cmd.backup_restore(dr_address, backup0_info["URL"]) wait_for_restore_completion(dr_address, backup0_info["URL"]) verify_no_frontend_data(grpc_engine_manager, data0.offset, data0.content, grpc_dr_controller) expand_volume_with_frontend(grpc_controller, EXPANDED_SIZE) wait_and_check_volume_expansion(grpc_controller, EXPANDED_SIZE) data1_len = random_length(PAGE_SIZE) data1 = Data(random.randrange(SIZE, EXPANDED_SIZE - PAGE_SIZE, PAGE_SIZE), data1_len, random_string(data1_len)) snap1 = Snapshot(dev, data1, address) backup1_info = create_backup(address, snap1.name, backup_target, EXPANDED_SIZE_STR) assert backup1_info["VolumeName"] == volume_name assert backup1_info["Size"] == str(2 * BLOCK_SIZE) backup_volumes = cmd.backup_volume_list(address, volume_name, backup_target) assert volume_name in backup_volumes assert backup_volumes[volume_name]["Size"] == EXPANDED_SIZE_STR # incremental restoration will implicitly expand the volume first restore_incrementally(dr_address, backup1_info["URL"], backup0_info["Name"]) check_dr_volume_block_device_size(grpc_engine_manager, grpc_dr_controller, EXPANDED_SIZE) verify_no_frontend_data(grpc_engine_manager, data1.offset, data1.content, grpc_dr_controller) cmd.backup_volume_rm(grpc_controller.address, volume_name, backup_target)