def backup_test( dev, address, # NOQA volume_name, engine_name, backup_target): offset = 0 length = 128 snap1_data = random_string(length) verify_data(dev, offset, snap1_data) snap1_checksum = checksum_dev(dev) snap1 = cmd.snapshot_create(address) backup1_info = create_backup(address, snap1, backup_target) assert backup1_info["VolumeName"] == volume_name assert backup1_info["Size"] == BLOCK_SIZE_STR snap2_data = random_string(length) verify_data(dev, offset, snap2_data) snap2_checksum = checksum_dev(dev) snap2 = cmd.snapshot_create(address) backup2_info = create_backup(address, snap2, backup_target) assert backup2_info["VolumeName"] == volume_name assert backup2_info["Size"] == BLOCK_SIZE_STR snap3_data = random_string(length) verify_data(dev, offset, snap3_data) snap3_checksum = checksum_dev(dev) snap3 = cmd.snapshot_create(address) backup3_info = create_backup(address, snap3, backup_target) assert backup3_info["VolumeName"] == volume_name assert backup3_info["Size"] == BLOCK_SIZE_STR restore_with_frontend(address, engine_name, backup3_info["URL"]) readed = read_dev(dev, offset, length) assert readed == snap3_data c = checksum_dev(dev) assert c == snap3_checksum rm_backups(address, engine_name, [backup3_info["URL"]]) restore_with_frontend(address, engine_name, backup1_info["URL"]) readed = read_dev(dev, offset, length) assert readed == snap1_data c = checksum_dev(dev) assert c == snap1_checksum rm_backups(address, engine_name, [backup1_info["URL"]]) restore_with_frontend(address, engine_name, backup2_info["URL"]) readed = read_dev(dev, offset, length) assert readed == snap2_data c = checksum_dev(dev) assert c == snap2_checksum rm_backups(address, engine_name, [backup2_info["URL"]])
def test_ha_remove_extra_disks( grpc_controller, # NOQA grpc_replica1, grpc_replica2): # NOQA address = grpc_controller.address prepare_backup_dir(BACKUP_DIR) open_replica(grpc_replica1) replicas = grpc_controller.replica_list() assert len(replicas) == 0 r1_url = grpc_replica1.url v = grpc_controller.volume_start(replicas=[r1_url]) assert v.name == VOLUME_NAME assert v.replicaCount == 1 replicas = grpc_controller.replica_list() assert len(replicas) == 1 assert replicas[0].mode == "RW" dev = get_blockdev(VOLUME_NAME) wasted_data = random_string(128) data_offset = 1024 verify_data(dev, data_offset, wasted_data) # now replica1 contains extra data in a snapshot cmd.snapshot_create(address) cleanup_controller(grpc_controller) open_replica(grpc_replica2) replicas = grpc_controller.replica_list() assert len(replicas) == 0 r2_url = grpc_replica2.url v = grpc_controller.volume_start(replicas=[r2_url]) assert v.name == VOLUME_NAME assert v.replicaCount == 1 replicas = grpc_controller.replica_list() assert len(replicas) == 1 assert replicas[0].mode == "RW" dev = get_blockdev(VOLUME_NAME) data = random_string(128) data_offset = 1024 verify_data(dev, data_offset, data) r1 = grpc_replica1.replica_reload() print(r1) cmd.add_replica(address, r1_url) wait_for_rebuild_complete(address) verify_data(dev, data_offset, data)
def backup_hole_with_backing_file_test( backup_target, # NOQA grpc_backing_controller, # NOQA grpc_backing_replica1, # NOQA grpc_backing_replica2): # NOQA address = grpc_backing_controller.address dev = get_backing_dev(grpc_backing_replica1, grpc_backing_replica2, grpc_backing_controller) offset1 = 512 length1 = 256 offset2 = 640 length2 = 256 boundary_offset = 0 boundary_length = 4100 # just pass 4096 into next 4k hole_offset = 2 * 1024 * 1024 hole_length = 1024 snap1_data = random_string(length1) verify_data(dev, offset1, snap1_data) snap1_checksum = checksum_dev(dev) snap1 = cmd.snapshot_create(address) boundary_data_backup1 = read_dev(dev, boundary_offset, boundary_length) hole_data_backup1 = read_dev(dev, hole_offset, hole_length) backup1_info = create_backup(address, snap1, backup_target) snap2_data = random_string(length2) verify_data(dev, offset2, snap2_data) snap2_checksum = checksum_dev(dev) snap2 = cmd.snapshot_create(address) boundary_data_backup2 = read_dev(dev, boundary_offset, boundary_length) hole_data_backup2 = read_dev(dev, hole_offset, hole_length) backup2_info = create_backup(address, snap2, backup_target) restore_with_frontend(address, ENGINE_BACKING_NAME, backup1_info["URL"]) readed = read_dev(dev, boundary_offset, boundary_length) assert readed == boundary_data_backup1 readed = read_dev(dev, hole_offset, hole_length) assert readed == hole_data_backup1 c = checksum_dev(dev) assert c == snap1_checksum restore_with_frontend(address, ENGINE_BACKING_NAME, backup2_info["URL"]) readed = read_dev(dev, boundary_offset, boundary_length) assert readed == boundary_data_backup2 readed = read_dev(dev, hole_offset, hole_length) assert readed == hole_data_backup2 c = checksum_dev(dev) assert c == snap2_checksum
def test_snapshot_revert_with_backing_file( grpc_backing_controller, # NOQA grpc_backing_replica1, # NOQA grpc_backing_replica2): # NOQA address = grpc_backing_controller.address dev = get_backing_dev(grpc_backing_replica1, grpc_backing_replica2, grpc_backing_controller) offset = 0 length = 256 snap0 = cmd.snapshot_create(address) before = read_dev(dev, offset, length) assert before != "" info = cmd.snapshot_info(address) assert len(info) == 2 assert VOLUME_HEAD in info assert snap0 in info exists = read_from_backing_file(offset, length) assert before == exists snapshot_revert_test(dev, address, ENGINE_BACKING_NAME) snapshot_revert_with_frontend(address, ENGINE_BACKING_NAME, snap0) after = read_dev(dev, offset, length) assert before == after
def __init__(self, dev, data, controller_addr): self.dev = dev self.data = data self.controller_addr = controller_addr self.data.write_and_verify_data(self.dev) self.checksum = checksum_dev(self.dev) self.name = cmd.snapshot_create(controller_addr)
def backup_with_backing_file_test( backup_target, # NOQA grpc_backing_controller, # NOQA grpc_backing_replica1, # NOQA grpc_backing_replica2): # NOQA address = grpc_backing_controller.address dev = get_backing_dev(grpc_backing_replica1, grpc_backing_replica2, grpc_backing_controller) offset = 0 length = 256 snap0 = cmd.snapshot_create(address) before = read_dev(dev, offset, length) assert before != "" snap0_checksum = checksum_dev(dev) exists = read_from_backing_file(offset, length) assert before == exists backup0_info = create_backup(address, snap0, backup_target) assert backup0_info["VolumeName"] == VOLUME_BACKING_NAME backup_test(dev, address, VOLUME_BACKING_NAME, ENGINE_BACKING_NAME, backup_target) restore_with_frontend(address, ENGINE_BACKING_NAME, backup0_info["URL"]) after = read_dev(dev, offset, length) assert before == after c = checksum_dev(dev) assert c == snap0_checksum rm_backups(address, ENGINE_BACKING_NAME, [backup0_info["URL"]])
def test_backup_volume_deletion( grpc_replica1, grpc_replica2, # NOQA grpc_controller, backup_targets): # NOQA offset = 0 length = 128 address = grpc_controller.address for backup_target in backup_targets: dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) snap_data = random_string(length) verify_data(dev, offset, snap_data) snap = cmd.snapshot_create(address) backup_info = create_backup(address, snap, backup_target) assert backup_info["VolumeName"] == VOLUME_NAME assert backup_info["Size"] == BLOCK_SIZE_STR assert snap in backup_info["SnapshotName"] cmd.backup_volume_rm(address, VOLUME_NAME, backup_target) info = cmd.backup_volume_list(address, VOLUME_NAME, backup_target) assert "cannot find" in info[VOLUME_NAME]["Messages"]["error"] cmd.sync_agent_server_reset(address) cleanup_controller(grpc_controller) cleanup_replica(grpc_replica1) cleanup_replica(grpc_replica2)
def test_ha_single_replica_rebuild( grpc_controller, # NOQA grpc_replica1, grpc_replica2): # NOQA address = grpc_controller.address open_replica(grpc_replica1) open_replica(grpc_replica2) replicas = grpc_controller.replica_list() assert len(replicas) == 0 r1_url = grpc_replica1.url r2_url = grpc_replica2.url v = grpc_controller.volume_start(replicas=[r1_url, r2_url]) assert v.replicaCount == 2 replicas = grpc_controller.replica_list() assert len(replicas) == 2 assert replicas[0].mode == "RW" assert replicas[1].mode == "RW" dev = get_blockdev(VOLUME_NAME) data = random_string(128) data_offset = 1024 verify_data(dev, data_offset, data) # Cleanup replica2 cleanup_replica(grpc_replica2) verify_async(dev, 10, 128, 1) verify_replica_state(grpc_controller, 1, "ERR") verify_read(dev, data_offset, data) grpc_controller.replica_delete(replicas[1].address) # Rebuild replica2 open_replica(grpc_replica2) cmd.add_replica(address, r2_url) wait_for_rebuild_complete(address) verify_async(dev, 10, 128, 1) verify_replica_state(grpc_controller, 1, "RW") verify_read(dev, data_offset, data) # WORKAROUND for unable to remove the parent of volume head newsnap = cmd.snapshot_create(address) info = cmd.snapshot_info(address) assert len(info) == 3 sysnap = info[newsnap]["parent"] assert info[sysnap]["parent"] == "" assert newsnap in info[sysnap]["children"] assert info[sysnap]["usercreated"] is False assert info[sysnap]["removed"] is False cmd.snapshot_purge(address) wait_for_purge_completion(address) info = cmd.snapshot_info(address) assert len(info) == 2 assert info[newsnap] is not None assert info[VOLUME_HEAD] is not None
def restore_inc_test( grpc_engine_manager, # NOQA grpc_controller, # NOQA grpc_replica1, grpc_replica2, # NOQA grpc_dr_controller, # NOQA grpc_dr_replica1, grpc_dr_replica2, # NOQA backup_target): # NOQA address = grpc_controller.address dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) zero_string = b'\x00'.decode('utf-8') # backup0: 256 random data in 1st block length0 = 256 snap0_data = random_string(length0) verify_data(dev, 0, snap0_data) verify_data(dev, BLOCK_SIZE, snap0_data) snap0 = cmd.snapshot_create(address) backup0 = create_backup(address, snap0, backup_target)["URL"] backup0_name = cmd.backup_inspect(address, backup0)['Name'] # backup1: 32 random data + 32 zero data + 192 random data in 1st block length1 = 32 offset1 = 32 snap1_data = zero_string * length1 verify_data(dev, offset1, snap1_data) snap1 = cmd.snapshot_create(address) backup1 = create_backup(address, snap1, backup_target)["URL"] backup1_name = cmd.backup_inspect(address, backup1)['Name'] # backup2: 32 random data + 256 random data in 1st block, # 256 random data in 2nd block length2 = 256 offset2 = 32 snap2_data = random_string(length2) verify_data(dev, offset2, snap2_data) verify_data(dev, BLOCK_SIZE, snap2_data) snap2 = cmd.snapshot_create(address) backup2 = create_backup(address, snap2, backup_target)["URL"] backup2_name = cmd.backup_inspect(address, backup2)['Name'] # backup3: 64 zero data + 192 random data in 1st block length3 = 64 offset3 = 0 verify_data(dev, offset3, zero_string * length3) verify_data(dev, length2, zero_string * offset2) verify_data(dev, BLOCK_SIZE, zero_string * length2) snap3 = cmd.snapshot_create(address) backup3 = create_backup(address, snap3, backup_target)["URL"] backup3_name = cmd.backup_inspect(address, backup3)['Name'] # backup4: 256 random data in 1st block length4 = 256 offset4 = 0 snap4_data = random_string(length4) verify_data(dev, offset4, snap4_data) snap4 = cmd.snapshot_create(address) backup4 = create_backup(address, snap4, backup_target)["URL"] backup4_name = cmd.backup_inspect(address, backup4)['Name'] # start no-frontend volume # start dr volume (no frontend) dr_address = grpc_dr_controller.address start_no_frontend_volume(grpc_engine_manager, grpc_dr_controller, grpc_dr_replica1, grpc_dr_replica2) cmd.backup_restore(dr_address, backup0) wait_for_restore_completion(dr_address, backup0) verify_no_frontend_data(grpc_engine_manager, 0, snap0_data, grpc_dr_controller) # mock restore crash/error delta_file1 = "volume-delta-" + backup0_name + ".img" if "vfs" in backup_target: command = ["find", VFS_DIR, "-type", "d", "-name", VOLUME_NAME] backup_volume_path = subprocess.check_output(command).strip() command = ["find", backup_volume_path, "-name", "*blk"] blocks = subprocess.check_output(command).split() assert len(blocks) != 0 for blk in blocks: command = ["mv", blk, blk + ".tmp".encode('utf-8')] subprocess.check_output(command).strip() # should fail is_failed = False cmd.restore_inc(dr_address, backup1, backup0_name) for i in range(RETRY_COUNTS): rs = cmd.restore_status(dr_address) for status in rs.values(): if status['backupURL'] != backup1: break if 'error' in status.keys(): if status['error'] != "": assert 'no such file or directory' in \ status['error'] is_failed = True if is_failed: break time.sleep(RETRY_INTERVAL) assert is_failed assert path.exists(FIXED_REPLICA_PATH1 + delta_file1) assert path.exists(FIXED_REPLICA_PATH2 + delta_file1) for blk in blocks: command = ["mv", blk + ".tmp".encode('utf-8'), blk] subprocess.check_output(command) data1 = \ snap0_data[0:offset1] + snap1_data + \ snap0_data[offset1+length1:] # race condition: last restoration has failed # but `isRestoring` hasn't been cleanup for i in range(RETRY_COUNTS): try: restore_incrementally(dr_address, backup1, backup0_name) break except subprocess.CalledProcessError as e: if "already in progress" not in e.output: time.sleep(RETRY_INTERVAL) else: raise e verify_no_frontend_data(grpc_engine_manager, 0, data1, grpc_dr_controller) assert not path.exists(FIXED_REPLICA_PATH1 + delta_file1) assert not path.exists(FIXED_REPLICA_PATH2 + delta_file1) status = cmd.restore_status(dr_address) compare_last_restored_with_backup(status, backup1_name) data2 = \ data1[0:offset2] + snap2_data + \ zero_string * (BLOCK_SIZE - length2 - offset2) + snap2_data restore_incrementally(dr_address, backup2, backup1_name) verify_no_frontend_data(grpc_engine_manager, 0, data2, grpc_dr_controller) delta_file2 = "volume-delta-" + backup1_name + ".img" assert not path.exists(FIXED_REPLICA_PATH1 + delta_file2) assert not path.exists(FIXED_REPLICA_PATH2 + delta_file2) status = cmd.restore_status(dr_address) compare_last_restored_with_backup(status, backup2_name) # mock race condition with pytest.raises(subprocess.CalledProcessError) as e: restore_incrementally(dr_address, backup1, backup0_name) assert "doesn't match lastRestored" in e data3 = zero_string * length3 + data2[length3:length2] restore_incrementally(dr_address, backup3, backup2_name) verify_no_frontend_data(grpc_engine_manager, 0, data3, grpc_dr_controller) delta_file3 = "volume-delta-" + backup3_name + ".img" assert not path.exists(FIXED_REPLICA_PATH1 + delta_file3) assert not path.exists(FIXED_REPLICA_PATH2 + delta_file3) status = cmd.restore_status(dr_address) compare_last_restored_with_backup(status, backup3_name) # mock corner case: invalid last-restored backup rm_backups(address, ENGINE_NAME, [backup3]) # actually it is full restoration restore_incrementally(dr_address, backup4, backup3_name) verify_no_frontend_data(grpc_engine_manager, 0, snap4_data, grpc_dr_controller) status = cmd.restore_status(dr_address) compare_last_restored_with_backup(status, backup4_name) if "vfs" in backup_target: command = ["find", VFS_DIR, "-type", "d", "-name", VOLUME_NAME] backup_volume_path = subprocess.check_output(command).strip() command = ["find", backup_volume_path, "-name", "*tempoary"] tmp_files = subprocess.check_output(command).split() assert len(tmp_files) == 0 cleanup_no_frontend_volume(grpc_engine_manager, grpc_dr_controller, grpc_dr_replica1, grpc_dr_replica2) rm_backups(address, ENGINE_NAME, [backup0, backup1, backup2, backup4]) cmd.sync_agent_server_reset(address) cleanup_controller(grpc_controller) cleanup_replica(grpc_replica1) cleanup_replica(grpc_replica2)
def restore_to_file_with_backing_file_test( backup_target, # NOQA grpc_backing_controller, # NOQA grpc_backing_replica1, # NOQA grpc_backing_replica2): # NOQA address = grpc_backing_controller.address backing_dev = get_backing_dev(grpc_backing_replica1, grpc_backing_replica2, grpc_backing_controller) length0 = 4 * 1024 length1 = 256 length2 = 128 offset0 = 0 offset1 = length1 + offset0 offset2 = length2 + offset0 output_raw_path = file(OUTPUT_FILE_RAW) output_qcow2_path = file(OUTPUT_FILE_QCOW2) # create 1 empty snapshot. # data in output image == data in backing check_backing() check_empty_volume(backing_dev) snap0 = cmd.snapshot_create(address) backup = create_backup(address, snap0, backup_target)["URL"] volume_data = read_dev(backing_dev, offset0, length0) backing_data = read_from_backing_file(offset0, length0) dev_checksum = checksum_dev(backing_dev) assert volume_data != "" assert volume_data == backing_data cmd.restore_to_file(address, backup, file(BACKING_FILE_QCOW), output_raw_path, IMAGE_FORMAT_RAW) output0_raw = read_file(output_raw_path, offset0, length0) output0_checksum = checksum_data( read_file(output_raw_path, 0, SIZE).encode('utf-8')) assert output0_raw == backing_data assert output0_checksum == dev_checksum os.remove(output_raw_path) assert not os.path.exists(output_raw_path) cmd.restore_to_file(address, backup, file(BACKING_FILE_QCOW), output_qcow2_path, IMAGE_FORMAT_QCOW2) output0_qcow2 = read_qcow2_file_without_backing_file( output_qcow2_path, offset0, length0) output0_checksum = checksum_data( read_qcow2_file_without_backing_file(output_qcow2_path, 0, SIZE)) assert output0_qcow2.decode('utf-8') == backing_data assert output0_qcow2.decode('utf-8') == volume_data assert output0_checksum == dev_checksum os.remove(output_qcow2_path) assert not os.path.exists(output_qcow2_path) rm_backups(address, ENGINE_BACKING_NAME, [backup]) # create 1 snapshot with 256B data. # output = snap1(offset0, length1) + backing(offset1, ...) snap1_data = random_string(length1) verify_data(backing_dev, offset0, snap1_data) snap1 = cmd.snapshot_create(address) backup = create_backup(address, snap1, backup_target)["URL"] volume_data = read_dev(backing_dev, offset0, length0) backing_data = read_from_backing_file(offset1, length0 - offset1) dev_checksum = checksum_dev(backing_dev) cmd.restore_to_file(address, backup, file(BACKING_FILE_QCOW), output_raw_path, IMAGE_FORMAT_RAW) output1_raw_snap1 = read_file(output_raw_path, offset0, length1) output1_raw_backing = read_file(output_raw_path, offset1, length0 - offset1) output1_checksum = checksum_data( read_file(output_raw_path, 0, SIZE).encode('utf-8')) assert output1_raw_snap1 == snap1_data assert output1_raw_backing == backing_data assert output1_raw_snap1 + output1_raw_backing == volume_data assert output1_checksum == dev_checksum os.remove(output_raw_path) assert not os.path.exists(output_raw_path) cmd.restore_to_file(address, backup, file(BACKING_FILE_QCOW), output_qcow2_path, IMAGE_FORMAT_QCOW2) output1_qcow2_snap1 = read_qcow2_file_without_backing_file( output_qcow2_path, offset0, length1) output1_qcow2_backing = read_qcow2_file_without_backing_file( output_qcow2_path, offset1, length0 - offset1) output1_checksum = checksum_data( read_qcow2_file_without_backing_file(output_qcow2_path, 0, SIZE)) assert output1_qcow2_snap1.decode('utf-8') == snap1_data assert output1_qcow2_backing.decode('utf-8') == backing_data assert output1_qcow2_snap1.decode('utf-8') + \ output1_qcow2_backing.decode('utf-8') == volume_data assert output1_checksum == dev_checksum os.remove(output_qcow2_path) assert not os.path.exists(output_qcow2_path) snapshot_revert_with_frontend(address, ENGINE_BACKING_NAME, snap0) rm_snaps(address, [snap1]) rm_backups(address, ENGINE_BACKING_NAME, [backup]) check_backing() check_empty_volume(backing_dev) # create 2 snapshots with 256B data and 128B data # output = snap2(offset0, length1 - length2) + # snap1(offset2, length2) + backing(offset2, ...) snap1_data = random_string(length1) verify_data(backing_dev, offset0, snap1_data) snap1 = cmd.snapshot_create(address) snap2_data = random_string(length2) verify_data(backing_dev, offset0, snap2_data) snap2 = cmd.snapshot_create(address) backup = create_backup(address, snap2, backup_target)["URL"] volume_data = read_dev(backing_dev, offset0, length0) backing_data = read_from_backing_file(offset1, length0 - offset1) dev_checksum = checksum_dev(backing_dev) cmd.restore_to_file(address, backup, file(BACKING_FILE_QCOW), output_raw_path, IMAGE_FORMAT_RAW) output2_raw_snap2 = read_file(output_raw_path, offset0, length2) output2_raw_snap1 = read_file(output_raw_path, offset2, length1 - length2) output2_raw_backing = read_file(output_raw_path, offset1, length0 - offset1) output2_checksum = checksum_data( read_file(output_raw_path, 0, SIZE).encode('utf-8')) assert output2_raw_snap2 == snap2_data assert output2_raw_snap1 == snap1_data[offset2:length1] assert output2_raw_backing == backing_data assert \ volume_data == \ output2_raw_snap2 + output2_raw_snap1 + output2_raw_backing assert output2_checksum == dev_checksum os.remove(output_raw_path) assert not os.path.exists(output_raw_path) cmd.restore_to_file(address, backup, file(BACKING_FILE_QCOW), output_qcow2_path, IMAGE_FORMAT_QCOW2) output2_qcow2_snap2 = read_qcow2_file_without_backing_file( output_qcow2_path, offset0, length2) output2_qcow2_snap1 = read_qcow2_file_without_backing_file( output_qcow2_path, offset2, length1 - length2) output2_qcow2_backing = read_qcow2_file_without_backing_file( output_qcow2_path, offset1, length0 - offset1) output2_checksum = checksum_data( read_qcow2_file_without_backing_file(output_qcow2_path, 0, SIZE)) assert output2_qcow2_snap2.decode('utf-8') == snap2_data assert output2_qcow2_snap1.decode('utf-8') == snap1_data[offset2:length1] assert output2_qcow2_backing.decode('utf-8') == backing_data assert \ volume_data == \ output2_qcow2_snap2.decode('utf-8') + \ output2_qcow2_snap1.decode('utf-8') + \ output1_qcow2_backing.decode('utf-8') assert output2_checksum == dev_checksum os.remove(output_qcow2_path) assert not os.path.exists(output_qcow2_path) snapshot_revert_with_frontend(address, ENGINE_BACKING_NAME, snap0) rm_snaps(address, [snap1, snap2]) rm_backups(address, ENGINE_BACKING_NAME, [backup]) check_backing() check_empty_volume(backing_dev)
def restore_to_file_without_backing_file_test( backup_target, # NOQA grpc_controller, # NOQA grpc_replica1, # NOQA grpc_replica2): # NOQA address = grpc_controller.address dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) length0 = 256 length1 = 128 offset0 = 0 offset1 = length1 + offset0 output_raw_path = file(OUTPUT_FILE_RAW) output_qcow2_path = file(OUTPUT_FILE_QCOW2) # create 1 empty snapshot for converting to init state. snap0 = cmd.snapshot_create(address) # create 1 snapshot with 256B data. # output = snap2(offset0, length1) snap1_data = random_string(length0) verify_data(dev, offset0, snap1_data) snap1 = cmd.snapshot_create(address) backup = create_backup(address, snap1, backup_target)["URL"] cmd.restore_to_file(address, backup, "", output_raw_path, IMAGE_FORMAT_RAW) output1_raw = read_file(output_raw_path, offset0, length0) assert output1_raw == snap1_data os.remove(output_raw_path) assert not os.path.exists(output_raw_path) cmd.restore_to_file(address, backup, "", output_qcow2_path, IMAGE_FORMAT_QCOW2) output1_qcow2 = read_qcow2_file_without_backing_file( output_qcow2_path, offset0, length0) assert output1_qcow2.decode('utf-8') == snap1_data os.remove(output_qcow2_path) assert not os.path.exists(output_qcow2_path) snapshot_revert_with_frontend(address, ENGINE_NAME, snap0) rm_snaps(address, [snap1]) rm_backups(address, ENGINE_NAME, [backup]) # create 2 snapshots with 256B data and 128B data # output = snap2(offset0, length1 - length2) + # snap1(offset2, length2) snap1_data = random_string(length0) verify_data(dev, offset0, snap1_data) snap1 = cmd.snapshot_create(address) snap2_data = random_string(length1) verify_data(dev, offset0, snap2_data) snap2 = cmd.snapshot_create(address) backup = create_backup(address, snap2, backup_target)["URL"] cmd.restore_to_file(address, backup, "", output_raw_path, IMAGE_FORMAT_RAW) output2_raw_snap2 = read_file(output_raw_path, offset0, length1) output2_raw_snap1 = read_file(output_raw_path, offset1, length0 - length1) assert output2_raw_snap2 == snap2_data assert output2_raw_snap1 == snap1_data[offset1:length0] cmd.restore_to_file(address, backup, "", output_qcow2_path, IMAGE_FORMAT_QCOW2) output2_qcow2_snap2 = read_qcow2_file_without_backing_file( output_qcow2_path, offset0, length1) output2_qcow2_snap1 = read_qcow2_file_without_backing_file( output_qcow2_path, offset1, length0 - length1) assert output2_qcow2_snap2.decode('utf-8') == snap2_data assert output2_qcow2_snap1.decode('utf-8') == snap1_data[offset1:length0] os.remove(output_qcow2_path) assert not os.path.exists(output_qcow2_path) snapshot_revert_with_frontend(address, ENGINE_NAME, snap0) rm_snaps(address, [snap1, snap2]) rm_backups(address, ENGINE_NAME, [backup])
def test_backup_type( grpc_replica1, grpc_replica2, # NOQA grpc_controller, backup_targets): # NOQA for backup_target in backup_targets: address = grpc_controller.address block_size = 2 * 1024 * 1024 dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) zero_string = b'\x00'.decode('utf-8') # backup0: 256 random data in 1st block length0 = 256 snap0_data = random_string(length0) verify_data(dev, 0, snap0_data) verify_data(dev, block_size, snap0_data) snap0 = cmd.snapshot_create(address) backup0 = create_backup(address, snap0, backup_target) backup0_url = backup0["URL"] assert backup0['IsIncremental'] is False # backup1: 32 random data + 32 zero data + 192 random data in 1st block length1 = 32 offset1 = 32 snap1_data = zero_string * length1 verify_data(dev, offset1, snap1_data) snap1 = cmd.snapshot_create(address) backup1 = create_backup(address, snap1, backup_target) backup1_url = backup1["URL"] assert backup1['IsIncremental'] is True # backup2: 32 random data + 256 random data in 1st block, # 256 random data in 2nd block length2 = 256 offset2 = 32 snap2_data = random_string(length2) verify_data(dev, offset2, snap2_data) verify_data(dev, block_size, snap2_data) snap2 = cmd.snapshot_create(address) backup2 = create_backup(address, snap2, backup_target) backup2_url = backup2["URL"] assert backup2['IsIncremental'] is True rm_backups(address, ENGINE_NAME, [backup2_url]) # backup3: 64 zero data + 192 random data in 1st block length3 = 64 offset3 = 0 verify_data(dev, offset3, zero_string * length3) verify_data(dev, length2, zero_string * offset2) verify_data(dev, block_size, zero_string * length2) snap3 = cmd.snapshot_create(address) backup3 = create_backup(address, snap3, backup_target) backup3_url = backup3["URL"] assert backup3['IsIncremental'] is False # backup4: 256 random data in 1st block length4 = 256 offset4 = 0 snap4_data = random_string(length4) verify_data(dev, offset4, snap4_data) snap4 = cmd.snapshot_create(address) backup4 = create_backup(address, snap4, backup_target) backup4_url = backup4["URL"] assert backup4['IsIncremental'] is True rm_backups(address, ENGINE_NAME, [backup0_url, backup1_url, backup3_url, backup4_url]) cmd.sync_agent_server_reset(address) cleanup_replica(grpc_replica1) cleanup_replica(grpc_replica2) cleanup_controller(grpc_controller)
def snapshot_tree_create_node(dev, address, offset, length, snap, data, name): data[name] = random_string(length) verify_data(dev, offset, data[name]) snap[name] = cmd.snapshot_create(address)