def test_snapshot_revert( grpc_controller, # NOQA grpc_replica1, grpc_replica2): # NOQA address = grpc_controller.address dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) snapshot_revert_test(dev, address, ENGINE_NAME)
def test_rw_with_metric( grpc_controller, # NOQA grpc_replica1, grpc_replica2): # NOQA rw_dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) replies = grpc_controller.metric_get() # skip the first metric since its fields are 0 next(replies).metric for i in range(0, 5): base = random.randint(1, SIZE - PAGE_SIZE) offset = (base // PAGE_SIZE) * PAGE_SIZE length = base - offset data = random_string(length) verify_data(rw_dev, offset, data) while 1: try: metric = next(replies).metric # it's hard to confirm the accurate value of metric assert metric.readBandwidth != 0 assert metric.writeBandwidth != 0 assert metric.iOPS != 0 break except StopIteration: time.sleep(1)
def test_backup_volume_deletion( grpc_replica1, grpc_replica2, # NOQA grpc_controller, backup_targets): # NOQA offset = 0 length = 128 address = grpc_controller.address for backup_target in backup_targets: dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) snap_data = random_string(length) verify_data(dev, offset, snap_data) snap = cmd.snapshot_create(address) backup_info = create_backup(address, snap, backup_target) assert backup_info["VolumeName"] == VOLUME_NAME assert backup_info["Size"] == BLOCK_SIZE_STR assert snap in backup_info["SnapshotName"] cmd.backup_volume_rm(address, VOLUME_NAME, backup_target) info = cmd.backup_volume_list(address, VOLUME_NAME, backup_target) assert "cannot find" in info[VOLUME_NAME]["Messages"]["error"] cmd.sync_agent_server_reset(address) cleanup_controller(grpc_controller) cleanup_replica(grpc_replica1) cleanup_replica(grpc_replica2)
def snapshot_tree_backup_test( backup_target, engine_name, # NOQA grpc_controller, grpc_replica1, grpc_replica2): # NOQA address = grpc_controller.address dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) offset = 0 length = 128 backup = {} snap, data = snapshot_tree_build(dev, address, engine_name, offset, length) backup["0b"] = create_backup(address, snap["0b"], backup_target)["URL"] backup["0c"] = create_backup(address, snap["0c"], backup_target)["URL"] backup["1c"] = create_backup(address, snap["1c"], backup_target)["URL"] backup["2b"] = create_backup(address, snap["2b"], backup_target)["URL"] backup["2c"] = create_backup(address, snap["2c"], backup_target)["URL"] backup["3c"] = create_backup(address, snap["3c"], backup_target)["URL"] snapshot_tree_verify_backup_node(dev, address, engine_name, offset, length, backup, data, "0b") snapshot_tree_verify_backup_node(dev, address, engine_name, offset, length, backup, data, "0c") snapshot_tree_verify_backup_node(dev, address, engine_name, offset, length, backup, data, "1c") snapshot_tree_verify_backup_node(dev, address, engine_name, offset, length, backup, data, "2b") snapshot_tree_verify_backup_node(dev, address, engine_name, offset, length, backup, data, "2c") snapshot_tree_verify_backup_node(dev, address, engine_name, offset, length, backup, data, "3c")
def test_expansion_without_backing_file( grpc_controller, # NOQA grpc_replica1, grpc_replica2): # NOQA zero_char = b'\x00'.decode('utf-8') dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) volume_expansion_with_snapshots_test(dev, grpc_controller, VOLUME_NAME, ENGINE_NAME, zero_char * SIZE)
def test_backup( grpc_replica1, grpc_replica2, # NOQA grpc_controller, backup_targets): # NOQA for backup_target in backup_targets: dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) backup_test(dev, grpc_controller.address, VOLUME_NAME, ENGINE_NAME, backup_target) cmd.sync_agent_server_reset(grpc_controller.address) cleanup_controller(grpc_controller) cleanup_replica(grpc_replica1) cleanup_replica(grpc_replica2)
def test_snapshot_rm_basic( grpc_controller, # NOQA grpc_replica1, grpc_replica2): # NOQA address = grpc_controller.address dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) existings = {} snap1 = Snapshot(dev, generate_random_data(existings), address) snap2 = Snapshot(dev, generate_random_data(existings), address) snap3 = Snapshot(dev, generate_random_data(existings), address) info = cmd.snapshot_info(address) assert len(info) == 4 assert VOLUME_HEAD in info assert snap1.name in info assert snap2.name in info assert snap3.name in info cmd.snapshot_rm(address, snap2.name) cmd.snapshot_purge(address) wait_for_purge_completion(address) info = cmd.snapshot_info(address) assert len(info) == 3 assert snap1.name in info assert snap3.name in info snap3.verify_checksum() snap2.verify_data() snap1.verify_data() snapshot_revert_with_frontend(address, ENGINE_NAME, snap1.name) snap3.refute_data() snap2.refute_data() snap1.verify_checksum()
def test_expansion_with_rebuild( grpc_controller, # NOQA grpc_replica1, grpc_replica2): # NOQA address = grpc_controller.address dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) replicas = grpc_controller.replica_list() assert len(replicas) == 2 assert replicas[0].mode == "RW" assert replicas[1].mode == "RW" # the default size is 4MB, will expand it to 8MB address = grpc_controller.address zero_char = b'\x00'.decode('utf-8') original_data = zero_char * SIZE # write the data to the original part then do expansion data1_len = random_length(PAGE_SIZE) data1 = Data(random.randrange(0, SIZE - 2 * PAGE_SIZE, PAGE_SIZE), data1_len, random_string(data1_len)) snap1 = Snapshot(dev, data1, address) grpc_controller.volume_expand(EXPAND_SIZE) wait_for_volume_expansion(grpc_controller, EXPAND_SIZE) check_block_device_size(VOLUME_NAME, EXPAND_SIZE) snap1.verify_data() assert \ dev.readat(0, SIZE) == \ original_data[0:data1.offset] + data1.content + \ original_data[data1.offset+data1.length:] assert dev.readat(SIZE, SIZE) == zero_char * SIZE # write the data to both the original part and the expanded part data2_len = random_length(PAGE_SIZE) data2 = Data(SIZE - PAGE_SIZE, data2_len, random_string(data2_len)) snap2 = Snapshot(dev, data2, address) data3_len = random_length(PAGE_SIZE) data3 = Data(random.randrange(SIZE, EXPAND_SIZE - PAGE_SIZE, PAGE_SIZE), data3_len, random_string(data3_len)) snap3 = Snapshot(dev, data3, address) snap1.verify_data() snap2.verify_data() snap3.verify_data() assert \ dev.readat(SIZE, SIZE) == zero_char*(data3.offset-SIZE) + \ data3.content + zero_char*(EXPAND_SIZE-data3.offset-data3.length) # Cleanup replica2 cleanup_replica(grpc_replica2) verify_replica_state(grpc_controller, 1, "ERR") grpc_controller.replica_delete(replicas[1].address) # Rebuild replica2. open_replica(grpc_replica2) # The newly opened replica2 will be expanded automatically cmd.add_replica(address, grpc_replica2.url) wait_for_rebuild_complete(address) verify_replica_state(grpc_controller, 1, "RW") # Cleanup replica1 then check if the rebuilt replica2 works fine cleanup_replica(grpc_replica1) verify_replica_state(grpc_controller, 0, "ERR") grpc_controller.replica_delete(replicas[0].address) assert \ dev.readat(0, SIZE) == \ original_data[0:data1.offset] + data1.content + \ original_data[data1.offset+data1.length:data2.offset] + \ data2.content + \ original_data[data2.offset+data2.length:] assert \ dev.readat(SIZE, SIZE) == zero_char*(data3.offset-SIZE) + \ data3.content + zero_char*(EXPAND_SIZE-data3.offset-data3.length) data4_len = random_length(PAGE_SIZE) data4 = Data(data1.offset, data4_len, random_string(data4_len)) snap4 = Snapshot(dev, data4, address) snap4.verify_data()
def dev(request, grpc_replica_client, grpc_controller_client): grpc_replica1 = grpc_replica_client(REPLICA_NAME + "-1") grpc_replica2 = grpc_replica_client(REPLICA_NAME + "-2") grpc_controller = grpc_controller_client(ENGINE_NAME, VOLUME_NAME) return get_dev(grpc_replica1, grpc_replica2, grpc_controller)
def test_upgrade( grpc_engine_manager, # NOQA grpc_controller, # NOQA grpc_fixed_dir_replica1, grpc_fixed_dir_replica2, # NOQA grpc_extra_replica1, grpc_extra_replica2): # NOQA dev = get_dev(grpc_fixed_dir_replica1, grpc_fixed_dir_replica2, grpc_controller) offset = 0 length = 128 data = random_string(length) verify_data(dev, offset, data) # both set pointed to the same volume underlying r1_url = grpc_fixed_dir_replica1.url r2_url = grpc_fixed_dir_replica2.url upgrade_r1_url = grpc_extra_replica1.url upgrade_r2_url = grpc_extra_replica2.url v = grpc_controller.volume_start(replicas=[r1_url, r2_url]) assert v.replicaCount == 2 upgrade_e = grpc_engine_manager.engine_upgrade( ENGINE_NAME, LONGHORN_UPGRADE_BINARY, SIZE, [upgrade_r1_url, upgrade_r2_url]) assert upgrade_e.spec.binary == LONGHORN_UPGRADE_BINARY verify_data(dev, offset, data) grpc_controller.client_upgrade(upgrade_e.spec.listen) wait_for_process_running(grpc_engine_manager, ENGINE_NAME, INSTANCE_MANAGER_TYPE_ENGINE) # cannot start with same binary with pytest.raises(grpc.RpcError): grpc_engine_manager.engine_upgrade(ENGINE_NAME, LONGHORN_UPGRADE_BINARY, SIZE, [r1_url, r2_url]) verify_data(dev, offset, data) # cannot start with wrong replica, would trigger rollback with pytest.raises(grpc.RpcError): grpc_engine_manager.engine_upgrade(ENGINE_NAME, LONGHORN_UPGRADE_BINARY, SIZE, ["random"]) verify_data(dev, offset, data) grpc_fixed_dir_replica1 = cleanup_replica(grpc_fixed_dir_replica1) grpc_fixed_dir_replica2 = cleanup_replica(grpc_fixed_dir_replica2) open_replica(grpc_fixed_dir_replica1) open_replica(grpc_fixed_dir_replica2) e = grpc_engine_manager.engine_upgrade(ENGINE_NAME, LONGHORN_BINARY, SIZE, [r1_url, r2_url]) assert e.spec.binary == LONGHORN_BINARY verify_data(dev, offset, data) grpc_controller.client_upgrade(e.spec.listen) wait_for_process_running(grpc_engine_manager, ENGINE_NAME, INSTANCE_MANAGER_TYPE_ENGINE)
def restore_inc_test( grpc_engine_manager, # NOQA grpc_controller, # NOQA grpc_replica1, grpc_replica2, # NOQA grpc_dr_controller, # NOQA grpc_dr_replica1, grpc_dr_replica2, # NOQA backup_target): # NOQA address = grpc_controller.address dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) zero_string = b'\x00'.decode('utf-8') # backup0: 256 random data in 1st block length0 = 256 snap0_data = random_string(length0) verify_data(dev, 0, snap0_data) verify_data(dev, BLOCK_SIZE, snap0_data) snap0 = cmd.snapshot_create(address) backup0 = create_backup(address, snap0, backup_target)["URL"] backup0_name = cmd.backup_inspect(address, backup0)['Name'] # backup1: 32 random data + 32 zero data + 192 random data in 1st block length1 = 32 offset1 = 32 snap1_data = zero_string * length1 verify_data(dev, offset1, snap1_data) snap1 = cmd.snapshot_create(address) backup1 = create_backup(address, snap1, backup_target)["URL"] backup1_name = cmd.backup_inspect(address, backup1)['Name'] # backup2: 32 random data + 256 random data in 1st block, # 256 random data in 2nd block length2 = 256 offset2 = 32 snap2_data = random_string(length2) verify_data(dev, offset2, snap2_data) verify_data(dev, BLOCK_SIZE, snap2_data) snap2 = cmd.snapshot_create(address) backup2 = create_backup(address, snap2, backup_target)["URL"] backup2_name = cmd.backup_inspect(address, backup2)['Name'] # backup3: 64 zero data + 192 random data in 1st block length3 = 64 offset3 = 0 verify_data(dev, offset3, zero_string * length3) verify_data(dev, length2, zero_string * offset2) verify_data(dev, BLOCK_SIZE, zero_string * length2) snap3 = cmd.snapshot_create(address) backup3 = create_backup(address, snap3, backup_target)["URL"] backup3_name = cmd.backup_inspect(address, backup3)['Name'] # backup4: 256 random data in 1st block length4 = 256 offset4 = 0 snap4_data = random_string(length4) verify_data(dev, offset4, snap4_data) snap4 = cmd.snapshot_create(address) backup4 = create_backup(address, snap4, backup_target)["URL"] backup4_name = cmd.backup_inspect(address, backup4)['Name'] # start no-frontend volume # start dr volume (no frontend) dr_address = grpc_dr_controller.address start_no_frontend_volume(grpc_engine_manager, grpc_dr_controller, grpc_dr_replica1, grpc_dr_replica2) cmd.backup_restore(dr_address, backup0) wait_for_restore_completion(dr_address, backup0) verify_no_frontend_data(grpc_engine_manager, 0, snap0_data, grpc_dr_controller) # mock restore crash/error delta_file1 = "volume-delta-" + backup0_name + ".img" if "vfs" in backup_target: command = ["find", VFS_DIR, "-type", "d", "-name", VOLUME_NAME] backup_volume_path = subprocess.check_output(command).strip() command = ["find", backup_volume_path, "-name", "*blk"] blocks = subprocess.check_output(command).split() assert len(blocks) != 0 for blk in blocks: command = ["mv", blk, blk + ".tmp".encode('utf-8')] subprocess.check_output(command).strip() # should fail is_failed = False cmd.restore_inc(dr_address, backup1, backup0_name) for i in range(RETRY_COUNTS): rs = cmd.restore_status(dr_address) for status in rs.values(): if status['backupURL'] != backup1: break if 'error' in status.keys(): if status['error'] != "": assert 'no such file or directory' in \ status['error'] is_failed = True if is_failed: break time.sleep(RETRY_INTERVAL) assert is_failed assert path.exists(FIXED_REPLICA_PATH1 + delta_file1) assert path.exists(FIXED_REPLICA_PATH2 + delta_file1) for blk in blocks: command = ["mv", blk + ".tmp".encode('utf-8'), blk] subprocess.check_output(command) data1 = \ snap0_data[0:offset1] + snap1_data + \ snap0_data[offset1+length1:] # race condition: last restoration has failed # but `isRestoring` hasn't been cleanup for i in range(RETRY_COUNTS): try: restore_incrementally(dr_address, backup1, backup0_name) break except subprocess.CalledProcessError as e: if "already in progress" not in e.output: time.sleep(RETRY_INTERVAL) else: raise e verify_no_frontend_data(grpc_engine_manager, 0, data1, grpc_dr_controller) assert not path.exists(FIXED_REPLICA_PATH1 + delta_file1) assert not path.exists(FIXED_REPLICA_PATH2 + delta_file1) status = cmd.restore_status(dr_address) compare_last_restored_with_backup(status, backup1_name) data2 = \ data1[0:offset2] + snap2_data + \ zero_string * (BLOCK_SIZE - length2 - offset2) + snap2_data restore_incrementally(dr_address, backup2, backup1_name) verify_no_frontend_data(grpc_engine_manager, 0, data2, grpc_dr_controller) delta_file2 = "volume-delta-" + backup1_name + ".img" assert not path.exists(FIXED_REPLICA_PATH1 + delta_file2) assert not path.exists(FIXED_REPLICA_PATH2 + delta_file2) status = cmd.restore_status(dr_address) compare_last_restored_with_backup(status, backup2_name) # mock race condition with pytest.raises(subprocess.CalledProcessError) as e: restore_incrementally(dr_address, backup1, backup0_name) assert "doesn't match lastRestored" in e data3 = zero_string * length3 + data2[length3:length2] restore_incrementally(dr_address, backup3, backup2_name) verify_no_frontend_data(grpc_engine_manager, 0, data3, grpc_dr_controller) delta_file3 = "volume-delta-" + backup3_name + ".img" assert not path.exists(FIXED_REPLICA_PATH1 + delta_file3) assert not path.exists(FIXED_REPLICA_PATH2 + delta_file3) status = cmd.restore_status(dr_address) compare_last_restored_with_backup(status, backup3_name) # mock corner case: invalid last-restored backup rm_backups(address, ENGINE_NAME, [backup3]) # actually it is full restoration restore_incrementally(dr_address, backup4, backup3_name) verify_no_frontend_data(grpc_engine_manager, 0, snap4_data, grpc_dr_controller) status = cmd.restore_status(dr_address) compare_last_restored_with_backup(status, backup4_name) if "vfs" in backup_target: command = ["find", VFS_DIR, "-type", "d", "-name", VOLUME_NAME] backup_volume_path = subprocess.check_output(command).strip() command = ["find", backup_volume_path, "-name", "*tempoary"] tmp_files = subprocess.check_output(command).split() assert len(tmp_files) == 0 cleanup_no_frontend_volume(grpc_engine_manager, grpc_dr_controller, grpc_dr_replica1, grpc_dr_replica2) rm_backups(address, ENGINE_NAME, [backup0, backup1, backup2, backup4]) cmd.sync_agent_server_reset(address) cleanup_controller(grpc_controller) cleanup_replica(grpc_replica1) cleanup_replica(grpc_replica2)
def volume_expansion_with_backup_test( grpc_engine_manager, # NOQA grpc_controller, # NOQA grpc_dr_controller, # NOQA grpc_replica1, grpc_replica2, # NOQA grpc_dr_replica1, # NOQA grpc_dr_replica2, # NOQA volume_name, engine_name, backup_target): # NOQA address = grpc_controller.address dr_address = grpc_dr_controller.address dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) start_no_frontend_volume(grpc_engine_manager, grpc_dr_controller, grpc_dr_replica1, grpc_dr_replica2) try: cmd.backup_volume_rm(address, volume_name, backup_target) except Exception: pass data0_len = random_length(PAGE_SIZE) data0 = Data(random.randrange(0, SIZE - 2 * PAGE_SIZE, PAGE_SIZE), data0_len, random_string(data0_len)) snap0 = Snapshot(dev, data0, address) backup0_info = create_backup(address, snap0.name, backup_target) assert backup0_info["VolumeName"] == volume_name assert backup0_info["Size"] == str(BLOCK_SIZE) cmd.backup_restore(dr_address, backup0_info["URL"]) wait_for_restore_completion(dr_address, backup0_info["URL"]) verify_no_frontend_data(grpc_engine_manager, data0.offset, data0.content, grpc_dr_controller) grpc_controller.volume_expand(EXPAND_SIZE) wait_for_volume_expansion(grpc_controller, EXPAND_SIZE) check_block_device_size(volume_name, EXPAND_SIZE) data1_len = random_length(PAGE_SIZE) data1 = Data(random.randrange(SIZE, EXPAND_SIZE - PAGE_SIZE, PAGE_SIZE), data1_len, random_string(data1_len)) snap1 = Snapshot(dev, data1, address) backup1_info = create_backup(address, snap1.name, backup_target, EXPAND_SIZE_STR) assert backup1_info["VolumeName"] == volume_name assert backup1_info["Size"] == str(2 * BLOCK_SIZE) backup_volumes = cmd.backup_volume_list(address, volume_name, backup_target) assert volume_name in backup_volumes assert backup_volumes[volume_name]["Size"] == EXPAND_SIZE_STR # incremental restoration will implicitly expand the volume first restore_incrementally(dr_address, backup1_info["URL"], backup0_info["Name"]) check_dr_volume_block_device_size(grpc_engine_manager, grpc_dr_controller, EXPAND_SIZE) verify_no_frontend_data(grpc_engine_manager, data1.offset, data1.content, grpc_dr_controller) cmd.backup_volume_rm(grpc_controller.address, volume_name, backup_target)
def test_snapshot_tree_basic( grpc_controller, # NOQA grpc_replica1, grpc_replica2): # NOQA address = grpc_controller.address dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) offset = 0 length = 128 snap, data = snapshot_tree_build(dev, address, ENGINE_NAME, offset, length) snapshot_revert_with_frontend(address, ENGINE_NAME, snap["1b"]) cmd.snapshot_rm(address, snap["0a"]) cmd.snapshot_rm(address, snap["0b"]) cmd.snapshot_rm(address, snap["1c"]) cmd.snapshot_rm(address, snap["2a"]) cmd.snapshot_rm(address, snap["2b"]) cmd.snapshot_rm(address, snap["2c"]) cmd.snapshot_rm(address, snap["3a"]) cmd.snapshot_rm(address, snap["3b"]) cmd.snapshot_rm(address, snap["3c"]) cmd.snapshot_purge(address) wait_for_purge_completion(address) # the result should looks like this # snap["0b"](r) -> snap["0c"] # \-> snap["1a"] -> snap["1b"] -> head info = cmd.snapshot_info(address) assert len(info) == 5 assert snap["0b"] in info assert info[snap["0b"]]["parent"] == "" assert len(info[snap["0b"]]["children"]) == 2 assert snap["0c"] in info[snap["0b"]]["children"] assert snap["1a"] in info[snap["0b"]]["children"] assert info[snap["0b"]]["removed"] is True assert snap["0c"] in info assert info[snap["0c"]]["parent"] == snap["0b"] assert not info[snap["0c"]]["children"] assert snap["1a"] in info assert info[snap["1a"]]["parent"] == snap["0b"] assert snap["1b"] in info[snap["1a"]]["children"] assert snap["1b"] in info assert info[snap["1b"]]["parent"] == snap["1a"] assert VOLUME_HEAD in info[snap["1b"]]["children"] assert VOLUME_HEAD in info assert info[VOLUME_HEAD]["parent"] == snap["1b"] snapshot_tree_verify_node(dev, address, ENGINE_NAME, offset, length, snap, data, "0b") snapshot_tree_verify_node(dev, address, ENGINE_NAME, offset, length, snap, data, "0c") snapshot_tree_verify_node(dev, address, ENGINE_NAME, offset, length, snap, data, "1a") snapshot_tree_verify_node(dev, address, ENGINE_NAME, offset, length, snap, data, "1b")
def test_snapshot_rm_rolling( grpc_controller, # NOQA grpc_replica1, grpc_replica2): # NOQA address = grpc_controller.address dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) existings = {} snap1 = Snapshot(dev, generate_random_data(existings), address) snapList = cmd.snapshot_ls(address) assert snap1.name in snapList cmd.snapshot_rm(address, snap1.name) # cannot do anything because it's the parent of volume head cmd.snapshot_purge(address) wait_for_purge_completion(address) snap2 = Snapshot(dev, generate_random_data(existings), address) info = cmd.snapshot_info(address) assert len(info) == 3 assert snap1.name in info assert snap2.name in info assert info[snap1.name]["removed"] is True assert info[snap2.name]["removed"] is False cmd.snapshot_rm(address, snap2.name) # this should trigger the deletion of snap1 cmd.snapshot_purge(address) wait_for_purge_completion(address) snap2.verify_checksum() snap1.verify_data() snap3 = Snapshot(dev, generate_random_data(existings), address) snap4 = Snapshot(dev, generate_random_data(existings), address) snap5 = Snapshot(dev, generate_random_data(existings), address) snapList = cmd.snapshot_ls(address) assert snap1.name not in snapList assert snap2.name not in snapList assert snap3.name in snapList assert snap4.name in snapList assert snap5.name in snapList info = cmd.snapshot_info(address) assert len(info) == 5 assert snap1.name not in info assert snap2.name in info assert snap3.name in info assert snap4.name in info assert snap5.name in info assert info[snap2.name]["removed"] is True cmd.snapshot_rm(address, snap3.name) cmd.snapshot_rm(address, snap4.name) cmd.snapshot_rm(address, snap5.name) # this should trigger the deletion of snap2 - snap4 # and snap5 marked as removed cmd.snapshot_purge(address) wait_for_purge_completion(address) info = cmd.snapshot_info(address) assert len(info) == 2 assert snap1.name not in info assert snap2.name not in info assert snap3.name not in info assert snap4.name not in info assert snap5.name in info assert info[snap5.name]["removed"] is True snap5.verify_checksum() snap4.verify_data() snap3.verify_data() snap2.verify_data() snap1.verify_data()
def restore_to_file_without_backing_file_test( backup_target, # NOQA grpc_controller, # NOQA grpc_replica1, # NOQA grpc_replica2): # NOQA address = grpc_controller.address dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) length0 = 256 length1 = 128 offset0 = 0 offset1 = length1 + offset0 output_raw_path = file(OUTPUT_FILE_RAW) output_qcow2_path = file(OUTPUT_FILE_QCOW2) # create 1 empty snapshot for converting to init state. snap0 = cmd.snapshot_create(address) # create 1 snapshot with 256B data. # output = snap2(offset0, length1) snap1_data = random_string(length0) verify_data(dev, offset0, snap1_data) snap1 = cmd.snapshot_create(address) backup = create_backup(address, snap1, backup_target)["URL"] cmd.restore_to_file(address, backup, "", output_raw_path, IMAGE_FORMAT_RAW) output1_raw = read_file(output_raw_path, offset0, length0) assert output1_raw == snap1_data os.remove(output_raw_path) assert not os.path.exists(output_raw_path) cmd.restore_to_file(address, backup, "", output_qcow2_path, IMAGE_FORMAT_QCOW2) output1_qcow2 = read_qcow2_file_without_backing_file( output_qcow2_path, offset0, length0) assert output1_qcow2.decode('utf-8') == snap1_data os.remove(output_qcow2_path) assert not os.path.exists(output_qcow2_path) snapshot_revert_with_frontend(address, ENGINE_NAME, snap0) rm_snaps(address, [snap1]) rm_backups(address, ENGINE_NAME, [backup]) # create 2 snapshots with 256B data and 128B data # output = snap2(offset0, length1 - length2) + # snap1(offset2, length2) snap1_data = random_string(length0) verify_data(dev, offset0, snap1_data) snap1 = cmd.snapshot_create(address) snap2_data = random_string(length1) verify_data(dev, offset0, snap2_data) snap2 = cmd.snapshot_create(address) backup = create_backup(address, snap2, backup_target)["URL"] cmd.restore_to_file(address, backup, "", output_raw_path, IMAGE_FORMAT_RAW) output2_raw_snap2 = read_file(output_raw_path, offset0, length1) output2_raw_snap1 = read_file(output_raw_path, offset1, length0 - length1) assert output2_raw_snap2 == snap2_data assert output2_raw_snap1 == snap1_data[offset1:length0] cmd.restore_to_file(address, backup, "", output_qcow2_path, IMAGE_FORMAT_QCOW2) output2_qcow2_snap2 = read_qcow2_file_without_backing_file( output_qcow2_path, offset0, length1) output2_qcow2_snap1 = read_qcow2_file_without_backing_file( output_qcow2_path, offset1, length0 - length1) assert output2_qcow2_snap2.decode('utf-8') == snap2_data assert output2_qcow2_snap1.decode('utf-8') == snap1_data[offset1:length0] os.remove(output_qcow2_path) assert not os.path.exists(output_qcow2_path) snapshot_revert_with_frontend(address, ENGINE_NAME, snap0) rm_snaps(address, [snap1, snap2]) rm_backups(address, ENGINE_NAME, [backup])
def test_backup_type( grpc_replica1, grpc_replica2, # NOQA grpc_controller, backup_targets): # NOQA for backup_target in backup_targets: address = grpc_controller.address block_size = 2 * 1024 * 1024 dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) zero_string = b'\x00'.decode('utf-8') # backup0: 256 random data in 1st block length0 = 256 snap0_data = random_string(length0) verify_data(dev, 0, snap0_data) verify_data(dev, block_size, snap0_data) snap0 = cmd.snapshot_create(address) backup0 = create_backup(address, snap0, backup_target) backup0_url = backup0["URL"] assert backup0['IsIncremental'] is False # backup1: 32 random data + 32 zero data + 192 random data in 1st block length1 = 32 offset1 = 32 snap1_data = zero_string * length1 verify_data(dev, offset1, snap1_data) snap1 = cmd.snapshot_create(address) backup1 = create_backup(address, snap1, backup_target) backup1_url = backup1["URL"] assert backup1['IsIncremental'] is True # backup2: 32 random data + 256 random data in 1st block, # 256 random data in 2nd block length2 = 256 offset2 = 32 snap2_data = random_string(length2) verify_data(dev, offset2, snap2_data) verify_data(dev, block_size, snap2_data) snap2 = cmd.snapshot_create(address) backup2 = create_backup(address, snap2, backup_target) backup2_url = backup2["URL"] assert backup2['IsIncremental'] is True rm_backups(address, ENGINE_NAME, [backup2_url]) # backup3: 64 zero data + 192 random data in 1st block length3 = 64 offset3 = 0 verify_data(dev, offset3, zero_string * length3) verify_data(dev, length2, zero_string * offset2) verify_data(dev, block_size, zero_string * length2) snap3 = cmd.snapshot_create(address) backup3 = create_backup(address, snap3, backup_target) backup3_url = backup3["URL"] assert backup3['IsIncremental'] is False # backup4: 256 random data in 1st block length4 = 256 offset4 = 0 snap4_data = random_string(length4) verify_data(dev, offset4, snap4_data) snap4 = cmd.snapshot_create(address) backup4 = create_backup(address, snap4, backup_target) backup4_url = backup4["URL"] assert backup4['IsIncremental'] is True rm_backups(address, ENGINE_NAME, [backup0_url, backup1_url, backup3_url, backup4_url]) cmd.sync_agent_server_reset(address) cleanup_replica(grpc_replica1) cleanup_replica(grpc_replica2) cleanup_controller(grpc_controller)