def test_expand_multiple_times(): for i in range(30): em_client = ProcessManagerClient(INSTANCE_MANAGER_ENGINE) engine_process = create_engine_process(em_client) grpc_controller_client = ControllerClient( get_process_address(engine_process)) rm_client = ProcessManagerClient(INSTANCE_MANAGER_REPLICA) replica_process = create_replica_process(rm_client, REPLICA_NAME) grpc_replica_client = ReplicaClient( get_process_address(replica_process)) time.sleep(3) cleanup_replica(grpc_replica_client) open_replica(grpc_replica_client) r1_url = grpc_replica_client.url v = grpc_controller_client.volume_start(replicas=[ r1_url, ]) assert v.replicaCount == 1 expand_volume_with_frontend( grpc_controller_client, EXPANDED_SIZE) wait_and_check_volume_expansion( grpc_controller_client, EXPANDED_SIZE) cleanup_process(em_client) cleanup_process(rm_client)
def test_controller_expand(grpc_controller_client): # NOQA v = grpc_controller_client.volume_get() assert v.replicaCount == 0 f1 = create_backend_file() f2 = create_backend_file() addresses = ['file://' + f1, 'file://' + f2] v = grpc_controller_client.volume_start(replicas=addresses) assert v.replicaCount == 2 expand_volume_with_frontend(grpc_controller_client, EXPANDED_SIZE) wait_and_check_volume_expansion(grpc_controller_client, EXPANDED_SIZE) f1_size = os.path.getsize(f1) f2_size = os.path.getsize(f2) assert f1_size == f2_size == EXPANDED_SIZE v = grpc_controller_client.volume_shutdown() assert v.replicaCount == 0
def test_volume_expand_with_snapshots( # NOQA bin, grpc_controller_client, # NOQA grpc_replica_client, grpc_replica_client2): # NOQA open_replica(grpc_replica_client) open_replica(grpc_replica_client2) r1_url = grpc_replica_client.url r2_url = grpc_replica_client2.url v = grpc_controller_client.volume_start(replicas=[ r1_url, r2_url, ]) assert v.replicaCount == 2 cmd = [bin, '--url', grpc_controller_client.address, 'snapshot', 'create'] snap0 = subprocess.check_output(cmd, encoding='utf-8').strip() expected = grpc_replica_client.replica_get().chain[1] assert expected == 'volume-snap-{}.img'.format(snap0) cmd = [bin, '--url', grpc_controller_client.address, 'snapshot', 'create', '--label', 'name=snap1', '--label', 'key=value'] snap1 = subprocess.check_output(cmd, encoding='utf-8').strip() expand_volume_with_frontend(grpc_controller_client, EXPANDED_SIZE) wait_and_check_volume_expansion( grpc_controller_client, EXPANDED_SIZE) # `expand` will create a snapshot then apply the new size # on the new head file snap_expansion = get_expansion_snapshot_name() r1 = grpc_replica_client.replica_get() assert r1.chain[1] == 'volume-snap-{}.img'.format(snap_expansion) assert r1.size == EXPANDED_SIZE_STR r2 = grpc_replica_client2.replica_get() assert r2.chain[1] == 'volume-snap-{}.img'.format(snap_expansion) assert r2.size == EXPANDED_SIZE_STR replica_paths = get_replica_paths_from_snapshot_name(snap_expansion) assert replica_paths for p in replica_paths: snap_path = get_snapshot_file_paths( p, snap_expansion) assert snap_path is not None assert os.path.exists(snap_path) assert os.path.getsize(snap_path) == SIZE head_path = get_replica_head_file_path(p) assert head_path is not None assert os.path.exists(head_path) assert os.path.getsize(head_path) == EXPANDED_SIZE cmd = [bin, '--url', grpc_controller_client.address, 'snapshot', 'create', '--label', 'name=snap2'] snap2 = subprocess.check_output(cmd, encoding='utf-8').strip() cmd = [bin, '--debug', '--url', grpc_controller_client.address, 'snapshot', 'ls'] ls_output = subprocess.check_output(cmd, encoding='utf-8') assert ls_output == '''ID {} {} {} {} '''.format(snap2, snap_expansion, snap1, snap0) cmd = [bin, '--url', grpc_controller_client.address, 'snapshot', 'info'] output = subprocess.check_output(cmd) info = json.loads(output) # cannot check the snapshot size here since the output will return # the actual file size assert info[snap_expansion]["parent"] == snap1 assert info[snap_expansion]["removed"] is False assert info[snap_expansion]["usercreated"] is False assert len(info[snap_expansion]["labels"]) == 1 assert \ info[snap_expansion]["labels"]["replica-expansion"] \ == EXPANDED_SIZE_STR assert info[VOLUME_HEAD]["parent"] == snap2 assert len(info[VOLUME_HEAD]["labels"]) == 0 # snapshot purge command will coalesce the expansion snapshot # with its child snapshot `snap2` cmd = [bin, '--url', grpc_controller_client.address, 'snapshot', 'purge'] subprocess.check_call(cmd) wait_for_purge_completion(grpc_controller_client.address) cmd = [bin, '--debug', '--url', grpc_controller_client.address, 'snapshot', 'ls'] ls_output = subprocess.check_output(cmd, encoding='utf-8') assert ls_output == '''ID {} {} {} '''.format(snap2, snap1, snap0) cmd = [bin, '--url', grpc_controller_client.address, 'snapshot', 'info'] output = subprocess.check_output(cmd) info = json.loads(output) assert snap_expansion not in info assert info[snap2]["parent"] == snap1 assert info[snap2]["removed"] is False assert info[snap2]["usercreated"] is True for p in replica_paths: snap1_path = get_snapshot_file_paths( p, snap1) assert snap1_path is not None assert os.path.exists(snap1_path) assert os.path.getsize(snap1_path) == SIZE snap2_path = get_snapshot_file_paths( p, snap2) assert snap2_path is not None assert os.path.exists(snap2_path) assert os.path.getsize(snap2_path) == EXPANDED_SIZE # Make sure the smaller snapshot `snap1` can be folded to # the larger one `snap2` and the replica size won't change. cmd = [bin, '--url', grpc_controller_client.address, 'snapshot', 'rm', snap1] subprocess.check_call(cmd) cmd = [bin, '--url', grpc_controller_client.address, 'snapshot', 'purge'] subprocess.check_call(cmd) wait_for_purge_completion(grpc_controller_client.address) cmd = [bin, '--debug', '--url', grpc_controller_client.address, 'snapshot', 'ls'] ls_output = subprocess.check_output(cmd, encoding='utf-8') assert ls_output == '''ID {} {} '''.format(snap2, snap0) for p in replica_paths: snap0_path = get_snapshot_file_paths( p, snap0) assert snap0_path is not None assert os.path.exists(snap0_path) assert os.path.getsize(snap0_path) == SIZE snap2_path = get_snapshot_file_paths( p, snap2) assert snap2_path is not None assert os.path.exists(snap2_path) assert os.path.getsize(snap2_path) == EXPANDED_SIZE head_path = get_replica_head_file_path(p) assert head_path is not None assert os.path.exists(head_path) assert os.path.getsize(head_path) == EXPANDED_SIZE r1 = grpc_replica_client.replica_get() assert r1.chain[1] == 'volume-snap-{}.img'.format(snap2) assert r1.size == EXPANDED_SIZE_STR r2 = grpc_replica_client2.replica_get() assert r2.chain[1] == 'volume-snap-{}.img'.format(snap2) assert r2.size == EXPANDED_SIZE_STR
def test_expansion_rollback_with_rebuild(grpc_controller, grpc_fixed_dir_replica1, grpc_fixed_dir_replica2): # NOQA """ The test flow: 1. Write random data into the block device. 2. Create the 1st snapshot. 3. Create an empty directory using the tmp meta file path of the expansion disk for each replica. This will fail the following expansion and trigger expansion rollback. 4. Try to expand the volume but fails. Then the automatic rollback will be applied implicitly. 5. Check the volume status and if there are leftovers of the failed expansion. 6. Check if the volume is still usable by r/w data, then create the 2nd snapshot. 7. Retry expansion. It should succeed. 8. Verify the data and try data r/w. 9. Delete then rebuild the replica2. Then rebuilt replica2 will be expanded automatically. 10. Delete the replica1 then check if the rebuilt replica2 works fine. """ address = grpc_controller.address r1_url = grpc_fixed_dir_replica1.address r2_url = grpc_fixed_dir_replica2.address dev = get_dev(grpc_fixed_dir_replica1, grpc_fixed_dir_replica2, grpc_controller) replicas = grpc_controller.replica_list() assert len(replicas) == 2 assert replicas[0].mode == "RW" assert replicas[1].mode == "RW" # the default size is 4MB, will expand it to 8MB zero_char = b'\x00'.decode('utf-8') original_data = zero_char * SIZE # write the data to the original part then do expansion data1_len = random_length(PAGE_SIZE) data1 = Data(random.randrange(0, SIZE - 2 * PAGE_SIZE, PAGE_SIZE), data1_len, random_string(data1_len)) snap1 = Snapshot(dev, data1, address) # use the tmp meta file path of expansion disks to create empty directories # so that the expansion disk meta data update will fail. # Then expansion will fail and the rollback will be triggered. disk_meta_tmp_1 = os.path.join(FIXED_REPLICA_PATH1, EXPANSION_DISK_TMP_META_NAME) disk_meta_tmp_2 = os.path.join(FIXED_REPLICA_PATH2, EXPANSION_DISK_TMP_META_NAME) os.mkdir(disk_meta_tmp_1) os.mkdir(disk_meta_tmp_2) # All replicas' expansion will fail # then engine will do rollback automatically grpc_controller.volume_frontend_shutdown() grpc_controller.volume_expand(EXPANDED_SIZE) wait_for_volume_expansion(grpc_controller, SIZE) grpc_controller.volume_frontend_start(FRONTEND_TGT_BLOCKDEV) # Expansion should fail but the expansion rollback should succeed volume_info = grpc_controller.volume_get() assert volume_info.last_expansion_error != "" assert volume_info.last_expansion_failed_at != "" verify_replica_state(grpc_controller, r1_url, "RW") verify_replica_state(grpc_controller, r2_url, "RW") # The invalid disk and head will be cleaned up automatically # after the rollback expansion_disk_1 = os.path.join(FIXED_REPLICA_PATH1, EXPANSION_DISK_NAME) expansion_disk_2 = os.path.join(FIXED_REPLICA_PATH2, EXPANSION_DISK_NAME) assert not os.path.exists(expansion_disk_1) assert not os.path.exists(expansion_disk_2) assert not os.path.exists(disk_meta_tmp_1) assert not os.path.exists(disk_meta_tmp_2) # The meta info file should keep unchanged replica_meta_file_1 = os.path.join(FIXED_REPLICA_PATH1, REPLICA_META_FILE_NAME) replica_meta_file_2 = os.path.join(FIXED_REPLICA_PATH2, REPLICA_META_FILE_NAME) with open(replica_meta_file_1) as f: replica_meta_1 = json.load(f) assert replica_meta_1["Size"] == SIZE with open(replica_meta_file_2) as f: replica_meta_2 = json.load(f) assert replica_meta_2["Size"] == SIZE # try to check then write new data snap1.verify_data() data2_len = random_length(PAGE_SIZE) data2 = Data(SIZE - PAGE_SIZE, data2_len, random_string(data2_len)) snap2 = Snapshot(dev, data2, address) # Retry expansion expand_volume_with_frontend(grpc_controller, EXPANDED_SIZE) wait_and_check_volume_expansion(grpc_controller, EXPANDED_SIZE) with open(replica_meta_file_1) as f: replica_meta_1 = json.load(f) assert replica_meta_1["Size"] == EXPANDED_SIZE with open(replica_meta_file_2) as f: replica_meta_2 = json.load(f) assert replica_meta_2["Size"] == EXPANDED_SIZE assert os.path.exists(expansion_disk_1) assert os.path.exists(expansion_disk_2) snap1.verify_data() snap2.verify_data() assert dev.readat(SIZE, SIZE) == zero_char * SIZE data3_len = random_length(PAGE_SIZE) data3 = Data(random.randrange(SIZE, EXPANDED_SIZE - PAGE_SIZE, PAGE_SIZE), data3_len, random_string(data3_len)) snap3 = Snapshot(dev, data3, address) snap1.verify_data() snap2.verify_data() snap3.verify_data() assert \ dev.readat(SIZE, SIZE) == zero_char*(data3.offset-SIZE) + \ data3.content + zero_char*(EXPANDED_SIZE-data3.offset-data3.length) # Delete replica2 cleanup_replica(grpc_fixed_dir_replica2) verify_replica_state(grpc_controller, r2_url, "ERR") grpc_controller.replica_delete(replicas[1].address) # Rebuild replica2. open_replica(grpc_fixed_dir_replica2) # The newly opened replica2 will be expanded automatically cmd.add_replica(address, grpc_fixed_dir_replica2.url) wait_for_rebuild_complete(address) verify_replica_state(grpc_controller, r2_url, "RW") # Cleanup replica1 then check if the rebuilt replica2 works fine cleanup_replica(grpc_fixed_dir_replica1) verify_replica_state(grpc_controller, r1_url, "ERR") grpc_controller.replica_delete(replicas[0].address) assert \ dev.readat(0, SIZE) == \ original_data[0:data1.offset] + data1.content + \ original_data[data1.offset+data1.length:data2.offset] + \ data2.content + \ original_data[data2.offset+data2.length:] assert \ dev.readat(SIZE, SIZE) == zero_char*(data3.offset-SIZE) + \ data3.content + zero_char*(EXPANDED_SIZE-data3.offset-data3.length) data4_len = random_length(PAGE_SIZE) data4 = Data(data1.offset, data4_len, random_string(data4_len)) snap4 = Snapshot(dev, data4, address) snap4.verify_data()
def test_expansion_with_rebuild( grpc_controller, # NOQA grpc_replica1, grpc_replica2): # NOQA address = grpc_controller.address dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) replicas = grpc_controller.replica_list() assert len(replicas) == 2 assert replicas[0].mode == "RW" assert replicas[1].mode == "RW" # the default size is 4MB, will expand it to 8MB address = grpc_controller.address zero_char = b'\x00'.decode('utf-8') original_data = zero_char * SIZE # write the data to the original part then do expansion data1_len = random_length(PAGE_SIZE) data1 = Data(random.randrange(0, SIZE - 2 * PAGE_SIZE, PAGE_SIZE), data1_len, random_string(data1_len)) snap1 = Snapshot(dev, data1, address) expand_volume_with_frontend(grpc_controller, EXPANDED_SIZE) wait_and_check_volume_expansion(grpc_controller, EXPANDED_SIZE) snap1.verify_data() assert \ dev.readat(0, SIZE) == \ original_data[0:data1.offset] + data1.content + \ original_data[data1.offset+data1.length:] assert dev.readat(SIZE, SIZE) == zero_char * SIZE # write the data to both the original part and the expanded part data2_len = random_length(PAGE_SIZE) data2 = Data(SIZE - PAGE_SIZE, data2_len, random_string(data2_len)) snap2 = Snapshot(dev, data2, address) data3_len = random_length(PAGE_SIZE) data3 = Data(random.randrange(SIZE, EXPANDED_SIZE - PAGE_SIZE, PAGE_SIZE), data3_len, random_string(data3_len)) snap3 = Snapshot(dev, data3, address) snap1.verify_data() snap2.verify_data() snap3.verify_data() assert \ dev.readat(SIZE, SIZE) == zero_char*(data3.offset-SIZE) + \ data3.content + zero_char*(EXPANDED_SIZE-data3.offset-data3.length) # Cleanup replica2 cleanup_replica(grpc_replica2) verify_replica_state(grpc_controller, grpc_replica2.address, "ERR") grpc_controller.replica_delete(replicas[1].address) # Rebuild replica2. open_replica(grpc_replica2) # The newly opened replica2 will be expanded automatically cmd.add_replica(address, grpc_replica2.url) wait_for_rebuild_complete(address) verify_replica_state(grpc_controller, grpc_replica2.address, "RW") # Cleanup replica1 then check if the rebuilt replica2 works fine cleanup_replica(grpc_replica1) verify_replica_state(grpc_controller, grpc_replica1.address, "ERR") grpc_controller.replica_delete(replicas[0].address) assert \ dev.readat(0, SIZE) == \ original_data[0:data1.offset] + data1.content + \ original_data[data1.offset+data1.length:data2.offset] + \ data2.content + \ original_data[data2.offset+data2.length:] assert \ dev.readat(SIZE, SIZE) == zero_char*(data3.offset-SIZE) + \ data3.content + zero_char*(EXPANDED_SIZE-data3.offset-data3.length) data4_len = random_length(PAGE_SIZE) data4 = Data(data1.offset, data4_len, random_string(data4_len)) snap4 = Snapshot(dev, data4, address) snap4.verify_data()
def test_inc_restore_with_rebuild_and_expansion(grpc_controller, grpc_replica1, grpc_replica2, grpc_controller_no_frontend, grpc_fixed_dir_replica1, grpc_fixed_dir_replica2, backup_targets): # NOQA # Pick up a random backup target. backup_target = backup_targets[random.randint(0, 1)] address = grpc_controller.address dr_address = grpc_controller_no_frontend.address try: cmd.backup_volume_rm(address, VOLUME_NAME, backup_target) except Exception: pass dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) start_no_frontend_volume(grpc_controller_no_frontend, grpc_fixed_dir_replica1) data0_len = random_length(PAGE_SIZE) data0 = Data(random.randrange(0, SIZE - 2 * PAGE_SIZE, PAGE_SIZE), data0_len, random_string(data0_len)) snap0 = Snapshot(dev, data0, address) backup0_info = create_backup(address, snap0.name, backup_target) assert backup0_info["VolumeName"] == VOLUME_NAME assert backup0_info["Size"] == str(BLOCK_SIZE) cmd.backup_restore(dr_address, backup0_info["URL"]) wait_for_restore_completion(dr_address, backup0_info["URL"]) verify_no_frontend_data(data0.offset, data0.content, grpc_controller_no_frontend) expand_volume_with_frontend(grpc_controller, EXPANDED_SIZE) wait_and_check_volume_expansion(grpc_controller, EXPANDED_SIZE) data1_len = random_length(PAGE_SIZE) data1 = Data(random.randrange(SIZE, EXPANDED_SIZE - PAGE_SIZE, PAGE_SIZE), data1_len, random_string(data1_len)) snap1 = Snapshot(dev, data1, address) backup1_info = create_backup(address, snap1.name, backup_target, EXPANDED_SIZE_STR) assert backup1_info["VolumeName"] == VOLUME_NAME assert backup1_info["Size"] == str(2 * BLOCK_SIZE) backup_volumes = cmd.backup_volume_list(address, VOLUME_NAME, backup_target) assert VOLUME_NAME in backup_volumes url = get_backup_volume_url(backup_target, VOLUME_NAME) backup_info = cmd.backup_inspect_volume(address, url) assert backup_info["Size"] == EXPANDED_SIZE_STR # restore command invocation should error out with pytest.raises(subprocess.CalledProcessError) as e: cmd.backup_restore(dr_address, backup1_info["URL"]) assert "need to expand the DR volume" in e.value.stdout # The above restore error is triggered before calling the replicas. # Hence the error won't be recorded in the restore status # and we can continue restoring backups for the DR volume. rs = cmd.restore_status(dr_address) for status in rs.values(): assert status['backupURL'] == backup0_info["URL"] assert status['lastRestored'] == backup0_info["Name"] assert 'error' not in status.keys() assert not status["isRestoring"] grpc_controller_no_frontend.volume_expand(EXPANDED_SIZE) wait_for_volume_expansion(grpc_controller_no_frontend, EXPANDED_SIZE) # This restore command will trigger snapshot purge. # And the error is triggered before calling the replicas. with pytest.raises(subprocess.CalledProcessError) as e: cmd.backup_restore(dr_address, backup1_info["URL"]) assert "found more than 1 snapshot in the replicas, " \ "hence started to purge snapshots before the restore" \ in e.value.stdout wait_for_purge_completion(dr_address) snaps_info = cmd.snapshot_info(dr_address) assert len(snaps_info) == 2 volume_head_name = "volume-head" snap_name = "expand-" + EXPANDED_SIZE_STR head_info = snaps_info[volume_head_name] assert head_info["name"] == volume_head_name assert head_info["parent"] == snap_name assert not head_info["children"] assert head_info["usercreated"] is False snap_info = snaps_info[snap_name] assert snap_info["name"] == snap_name assert not snap_info["parent"] assert volume_head_name in snap_info["children"] assert snap_info["usercreated"] is False cmd.backup_restore(dr_address, backup1_info["URL"]) wait_for_restore_completion(dr_address, backup1_info["URL"]) verify_no_frontend_data(data1.offset, data1.content, grpc_controller_no_frontend) # For DR volume, the rebuilding replica won't be expanded automatically. open_replica(grpc_fixed_dir_replica2) with pytest.raises(subprocess.CalledProcessError): cmd.add_replica(dr_address, grpc_fixed_dir_replica2.url, True) # Manually expand the rebuilding replica then retry `add-replica`. grpc_fixed_dir_replica2.replica_open() grpc_fixed_dir_replica2.replica_expand(EXPANDED_SIZE) grpc_fixed_dir_replica2.replica_close() cmd.add_replica(dr_address, grpc_fixed_dir_replica2.url, True) replicas = grpc_controller_no_frontend.replica_list() assert len(replicas) == 2 rw_replica, wo_replica = 0, 0 for r in replicas: if r.mode == 'RW': rw_replica += 1 else: assert r.mode == "WO" wo_replica += 1 assert rw_replica == 1 and wo_replica == 1 # The old replica will fail the restore but the error won't be recorded. # Then rebuilding replica will start full restore. with pytest.raises(subprocess.CalledProcessError) as e: cmd.backup_restore(dr_address, backup1_info["URL"]) assert "already restored backup" in e.value.stdout wait_for_restore_completion(dr_address, backup1_info["URL"]) cmd.verify_rebuild_replica(dr_address, grpc_fixed_dir_replica2.url) replicas = grpc_controller_no_frontend.replica_list() assert len(replicas) == 2 for r in replicas: assert r.mode == 'RW' verify_no_frontend_data(data1.offset, data1.content, grpc_controller_no_frontend) cmd.backup_volume_rm(grpc_controller.address, VOLUME_NAME, backup_target)
def volume_expansion_with_snapshots_test(dev, grpc_controller, # NOQA volume_name, engine_name, original_data): # the default size is 4MB, will expand it to 8MB address = grpc_controller.address zero_char = b'\x00'.decode('utf-8') # write the data to the original part then do expansion data1_len = random_length(PAGE_SIZE) data1 = Data(random.randrange(0, SIZE-2*PAGE_SIZE, PAGE_SIZE), data1_len, random_string(data1_len)) snap1 = Snapshot(dev, data1, address) expand_volume_with_frontend(grpc_controller, EXPANDED_SIZE) wait_and_check_volume_expansion( grpc_controller, EXPANDED_SIZE) snap1.verify_data() assert \ dev.readat(0, SIZE) == \ original_data[0:data1.offset] + data1.content + \ original_data[data1.offset+data1.length:] assert dev.readat(SIZE, SIZE) == zero_char*SIZE # write the data to both the original part and the expanded part data2_len = random_length(PAGE_SIZE) data2 = Data(SIZE-PAGE_SIZE, data2_len, random_string(data2_len)) snap2 = Snapshot(dev, data2, address) data3_len = random_length(PAGE_SIZE) data3 = Data(random.randrange(SIZE, EXPANDED_SIZE-PAGE_SIZE, PAGE_SIZE), data3_len, random_string(data3_len)) snap3 = Snapshot(dev, data3, address) snap1.verify_data() snap2.verify_data() snap3.verify_data() assert \ dev.readat(SIZE, SIZE) == zero_char*(data3.offset-SIZE) + \ data3.content + zero_char*(EXPANDED_SIZE-data3.offset-data3.length) data4_len = random_length(PAGE_SIZE) data4 = Data(data1.offset, data4_len, random_string(data4_len)) snap4 = Snapshot(dev, data4, address) snap4.verify_data() # revert to snap1 then see if we can still r/w the existing data # and expanded part snapshot_revert_with_frontend(address, engine_name, snap1.name) assert \ dev.readat(0, SIZE) == \ original_data[0:data1.offset] + data1.content + \ original_data[data1.offset+data1.length:] assert dev.readat(SIZE, SIZE) == zero_char*SIZE data5_len = random_length(PAGE_SIZE) data5 = Data(random.randrange(SIZE, EXPANDED_SIZE-PAGE_SIZE, PAGE_SIZE), data5_len, random_string(data5_len)) snap5 = Snapshot(dev, data5, address) snap5.verify_data() assert \ dev.readat(SIZE, SIZE) == zero_char*(data5.offset-SIZE) + \ data5.content + zero_char*(EXPANDED_SIZE-data5.offset-data5.length) # delete and purge the snap1. it will coalesce with the larger snap2 cmd.snapshot_rm(address, snap1.name) cmd.snapshot_purge(address) wait_for_purge_completion(address) assert \ dev.readat(0, SIZE) == \ original_data[0:data1.offset] + data1.content + \ original_data[data1.offset+data1.length:] assert \ dev.readat(SIZE, SIZE) == zero_char*(data5.offset-SIZE) + \ data5.content + zero_char*(EXPANDED_SIZE-data5.offset-data5.length)
def volume_expansion_with_backup_test( grpc_engine_manager, # NOQA grpc_controller, # NOQA grpc_dr_controller, # NOQA grpc_replica1, grpc_replica2, # NOQA grpc_dr_replica1, # NOQA grpc_dr_replica2, # NOQA volume_name, engine_name, backup_target): # NOQA address = grpc_controller.address dr_address = grpc_dr_controller.address dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) start_no_frontend_volume(grpc_engine_manager, grpc_dr_controller, grpc_dr_replica1, grpc_dr_replica2) try: cmd.backup_volume_rm(address, volume_name, backup_target) except Exception: pass data0_len = random_length(PAGE_SIZE) data0 = Data(random.randrange(0, SIZE - 2 * PAGE_SIZE, PAGE_SIZE), data0_len, random_string(data0_len)) snap0 = Snapshot(dev, data0, address) backup0_info = create_backup(address, snap0.name, backup_target) assert backup0_info["VolumeName"] == volume_name assert backup0_info["Size"] == str(BLOCK_SIZE) cmd.backup_restore(dr_address, backup0_info["URL"]) wait_for_restore_completion(dr_address, backup0_info["URL"]) verify_no_frontend_data(grpc_engine_manager, data0.offset, data0.content, grpc_dr_controller) expand_volume_with_frontend(grpc_controller, EXPANDED_SIZE) wait_and_check_volume_expansion(grpc_controller, EXPANDED_SIZE) data1_len = random_length(PAGE_SIZE) data1 = Data(random.randrange(SIZE, EXPANDED_SIZE - PAGE_SIZE, PAGE_SIZE), data1_len, random_string(data1_len)) snap1 = Snapshot(dev, data1, address) backup1_info = create_backup(address, snap1.name, backup_target, EXPANDED_SIZE_STR) assert backup1_info["VolumeName"] == volume_name assert backup1_info["Size"] == str(2 * BLOCK_SIZE) backup_volumes = cmd.backup_volume_list(address, volume_name, backup_target) assert volume_name in backup_volumes assert backup_volumes[volume_name]["Size"] == EXPANDED_SIZE_STR # incremental restoration will implicitly expand the volume first restore_incrementally(dr_address, backup1_info["URL"], backup0_info["Name"]) check_dr_volume_block_device_size(grpc_engine_manager, grpc_dr_controller, EXPANDED_SIZE) verify_no_frontend_data(grpc_engine_manager, data1.offset, data1.content, grpc_dr_controller) cmd.backup_volume_rm(grpc_controller.address, volume_name, backup_target)