def backup_with_backing_file_test(backup_target, # NOQA grpc_backing_controller, # NOQA grpc_backing_replica1, # NOQA grpc_backing_replica2): # NOQA address = grpc_backing_controller.address dev = get_backing_dev(grpc_backing_replica1, grpc_backing_replica2, grpc_backing_controller) offset = 0 length = 256 snap0 = cmd.snapshot_create(address) before = read_dev(dev, offset, length) assert before != "" snap0_checksum = checksum_dev(dev) exists = read_from_backing_file(offset, length) assert before == exists backup0_info = create_backup(address, snap0, backup_target) assert backup0_info["VolumeName"] == VOLUME_BACKING_NAME backup_test(dev, address, VOLUME_BACKING_NAME, ENGINE_BACKING_NAME, backup_target) restore_with_frontend(address, ENGINE_BACKING_NAME, backup0_info["URL"]) after = read_dev(dev, offset, length) assert before == after c = checksum_dev(dev) assert c == snap0_checksum rm_backups(address, ENGINE_BACKING_NAME, [backup0_info["URL"]])
def backup_hole_with_backing_file_test(backup_target, # NOQA grpc_backing_controller, # NOQA grpc_backing_replica1, # NOQA grpc_backing_replica2): # NOQA address = grpc_backing_controller.address dev = get_backing_dev(grpc_backing_replica1, grpc_backing_replica2, grpc_backing_controller) offset1 = 512 length1 = 256 offset2 = 640 length2 = 256 boundary_offset = 0 boundary_length = 4100 # just pass 4096 into next 4k hole_offset = 2 * 1024 * 1024 hole_length = 1024 snap1_data = random_string(length1) verify_data(dev, offset1, snap1_data) snap1_checksum = checksum_dev(dev) snap1 = cmd.snapshot_create(address) boundary_data_backup1 = read_dev(dev, boundary_offset, boundary_length) hole_data_backup1 = read_dev(dev, hole_offset, hole_length) backup1_info = create_backup(address, snap1, backup_target) snap2_data = random_string(length2) verify_data(dev, offset2, snap2_data) snap2_checksum = checksum_dev(dev) snap2 = cmd.snapshot_create(address) boundary_data_backup2 = read_dev(dev, boundary_offset, boundary_length) hole_data_backup2 = read_dev(dev, hole_offset, hole_length) backup2_info = create_backup(address, snap2, backup_target) restore_with_frontend(address, ENGINE_BACKING_NAME, backup1_info["URL"]) readed = read_dev(dev, boundary_offset, boundary_length) assert readed == boundary_data_backup1 readed = read_dev(dev, hole_offset, hole_length) assert readed == hole_data_backup1 c = checksum_dev(dev) assert c == snap1_checksum restore_with_frontend(address, ENGINE_BACKING_NAME, backup2_info["URL"]) readed = read_dev(dev, boundary_offset, boundary_length) assert readed == boundary_data_backup2 readed = read_dev(dev, hole_offset, hole_length) assert readed == hole_data_backup2 c = checksum_dev(dev) assert c == snap2_checksum
def snapshot_tree_verify_backup_node( grpc_controller, grpc_replica1, grpc_replica2, address, engine_name, offset, length, backup, data, name): # NOQA reset_volume(grpc_controller, grpc_replica1, grpc_replica2) dev = get_blockdev(grpc_controller.volume_get().name) restore_with_frontend(address, engine_name, backup[name]) readed = read_dev(dev, offset, length) assert readed == data[name]
def backup_test( dev, address, # NOQA volume_name, engine_name, backup_target): offset = 0 length = 128 snap1_data = random_string(length) verify_data(dev, offset, snap1_data) snap1_checksum = checksum_dev(dev) snap1 = cmd.snapshot_create(address) backup1_info = create_backup(address, snap1, backup_target) assert backup1_info["VolumeName"] == volume_name assert backup1_info["Size"] == BLOCK_SIZE_STR snap2_data = random_string(length) verify_data(dev, offset, snap2_data) snap2_checksum = checksum_dev(dev) snap2 = cmd.snapshot_create(address) backup2_info = create_backup(address, snap2, backup_target) assert backup2_info["VolumeName"] == volume_name assert backup2_info["Size"] == BLOCK_SIZE_STR snap3_data = random_string(length) verify_data(dev, offset, snap3_data) snap3_checksum = checksum_dev(dev) snap3 = cmd.snapshot_create(address) backup3_info = create_backup(address, snap3, backup_target) assert backup3_info["VolumeName"] == volume_name assert backup3_info["Size"] == BLOCK_SIZE_STR restore_with_frontend(address, engine_name, backup3_info["URL"]) readed = read_dev(dev, offset, length) assert readed == snap3_data c = checksum_dev(dev) assert c == snap3_checksum rm_backups(address, engine_name, [backup3_info["URL"]]) restore_with_frontend(address, engine_name, backup1_info["URL"]) readed = read_dev(dev, offset, length) assert readed == snap1_data c = checksum_dev(dev) assert c == snap1_checksum rm_backups(address, engine_name, [backup1_info["URL"]]) restore_with_frontend(address, engine_name, backup2_info["URL"]) readed = read_dev(dev, offset, length) assert readed == snap2_data c = checksum_dev(dev) assert c == snap2_checksum rm_backups(address, engine_name, [backup2_info["URL"]])
def test_backup_lock( grpc_replica1, grpc_replica2, # NOQA grpc_controller, backup_targets): # NOQA """ Test backup locks Context: The idea is to implement a locking mechanism that utilizes the backupstore, to prevent the following dangerous cases of concurrent operations. - prevent backup deletion during backup restoration - prevent backup deletion while a backup is in progress - prevent backup creation during backup deletion - prevent backup restoration during backup deletion Steps: 1. Create a volume(1) and attach to the current node 2. create a backup(1) of volume(1) 3. verify backup(1) creation completed 4. write some data to volume(1) 5. create an active lock of type Delete 6. create a backup(2) of volume(1) 7. verify backup(2) creation timed out 8. delete active lock of type Delete 9. create an active lock of type Delete 10. restore backup(1) 11. verify backup(1) restore timed out 12. delete active lock of type Delete 13. restore backup(1) 14. verify backup(1) restore completed 15. create an active lock of type Restore 16. delete backup(1) 17. verify backup(1) deletion timed out 18. delete active lock of type Restore 19. delete backup(1) 20. verify backup(1) deletion completed 21. cleanup """ for backup_target in backup_targets: dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) # create a regular backup address = grpc_controller.address offset = 0 length = 128 snap1_data = random_string(length) verify_data(dev, offset, snap1_data) snap1_checksum = checksum_dev(dev) snap1 = cmd.snapshot_create(address) # create a backup to create the volume info = create_backup(address, snap1, backup_target) assert info["VolumeName"] == VOLUME_NAME assert info["Size"] == BLOCK_SIZE_STR assert snap1 in info["SnapshotName"] # backup should error out with timeout # because of delete lock create_delete_lock(True) with pytest.raises(subprocess.CalledProcessError): create_backup(address, snap1, backup_target) remove_lock_file(DELETE_LOCK) # restore should error out with timeout # because of delete lock create_delete_lock(True) with pytest.raises(subprocess.CalledProcessError): restore_with_frontend(address, ENGINE_NAME, info["URL"]) remove_lock_file(DELETE_LOCK) # restore should succeed now, that there is no active delete lock restore_with_frontend(address, ENGINE_NAME, info["URL"]) readed = read_dev(dev, offset, length) assert readed == snap1_data c = checksum_dev(dev) assert c == snap1_checksum # delete should error out with timeout # because of restore lock create_restore_lock(True) with pytest.raises(subprocess.CalledProcessError): rm_backups(address, ENGINE_NAME, [info["URL"]]) remove_lock_file(RESTORE_LOCK) # delete should succeed now, that there is no active restore lock rm_backups(address, ENGINE_NAME, [info["URL"]]) # cleanup volume 1 cmd.sync_agent_server_reset(address) cleanup_controller(grpc_controller) cleanup_replica(grpc_replica1) cleanup_replica(grpc_replica2)
def test_backup_incremental_logic(grpc_replica1, grpc_replica2, grpc_controller, backup_targets): # NOQA for backup_target in backup_targets: dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) address = grpc_controller.address volume_name = VOLUME_NAME engine_name = ENGINE_NAME offset = 0 length = 128 # initial backup snap1_data = random_string(length) verify_data(dev, offset, snap1_data) snap1_checksum = checksum_dev(dev) snap1 = cmd.snapshot_create(address) backup1_info = create_backup(address, snap1, backup_target) assert backup1_info["IsIncremental"] is False # delta backup on top of initial backup snap2_data = random_string(int(length / 2)) verify_data(dev, offset, snap2_data) snap2 = cmd.snapshot_create(address) backup2_info = create_backup(address, snap2, backup_target) assert backup2_info["IsIncremental"] is True # delete the volume cmd.sync_agent_server_reset(address) grpc_controller = cleanup_controller(grpc_controller) grpc_replica1 = cleanup_replica(grpc_replica1) grpc_replica2 = cleanup_replica(grpc_replica2) # recreate the volume dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller, clean_backup_dir=False) # empty initial backup after volume recreation snap3 = cmd.snapshot_create(address) backup3_info = create_backup(address, snap3, backup_target) assert backup3_info["VolumeName"] == volume_name assert backup3_info["Size"] == '0' assert backup3_info["IsIncremental"] is False # write half of snap1 onto head snap4_data = snap1_data[:int(length / 2)] assert len(snap4_data) == int(length / 2) verify_data(dev, offset, snap4_data) snap4_checksum = checksum_dev(dev) assert snap4_checksum != snap1_checksum snap4 = cmd.snapshot_create(address) backup4_info = create_backup(address, snap4, backup_target) assert backup4_info["IsIncremental"] is True # restore initial backup reset_volume(grpc_controller, grpc_replica1, grpc_replica2) dev = get_blockdev(volume_name) restore_with_frontend(address, engine_name, backup1_info["URL"]) assert read_dev(dev, offset, length) == snap1_data assert checksum_dev(dev) == snap1_checksum # restore final backup (half of snap1) reset_volume(grpc_controller, grpc_replica1, grpc_replica2) dev = get_blockdev(volume_name) restore_with_frontend(address, engine_name, backup4_info["URL"]) assert checksum_dev(dev) == snap4_checksum assert snap4_checksum != snap1_checksum data = read_dev(dev, offset, length) assert data[:int(length / 2)] == snap4_data assert data[int(length / 2):] == '\x00' * int(length / 2) rm_backups(address, engine_name, [ backup1_info["URL"], backup2_info["URL"], backup3_info["URL"], backup4_info["URL"] ]) cmd.sync_agent_server_reset(address) cleanup_controller(grpc_controller) cleanup_replica(grpc_replica1) cleanup_replica(grpc_replica2)
def test_backup_S3_latest_unavailable( grpc_replica1, grpc_replica2, # NOQA grpc_controller, backup_targets): # NOQA for backup_target in backup_targets: if "s3://" not in backup_target: continue dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) address = grpc_controller.address volume_name = VOLUME_NAME engine_name = ENGINE_NAME offset = 0 length = 128 # initial backup snap1_data = random_string(length) verify_data(dev, offset, snap1_data) snap1_checksum = checksum_dev(dev) snap1 = cmd.snapshot_create(address) backup1_info = create_backup(address, snap1, backup_target) # backup to be unavailable snap2_data = random_string(length) verify_data(dev, offset, snap2_data) snap2 = cmd.snapshot_create(address) backup2_info = create_backup(address, snap2, backup_target) # the gc after the restore will clean up the missing backup cfg = findfile(BACKUP_DIR, "backup_" + backup2_info["Name"] + ".cfg") os.remove(cfg) # final full backup after unavailable backup snap3_data = random_string(length) verify_data(dev, offset, snap3_data) snap3_checksum = checksum_dev(dev) snap3 = cmd.snapshot_create(address) backup3_info = create_backup(address, snap3, backup_target) assert backup3_info["VolumeName"] == volume_name assert backup3_info["Size"] == BLOCK_SIZE_STR # write some stuff on head head_data = random_string(length) verify_data(dev, offset, head_data) # test restore of the initial backup reset_volume(grpc_controller, grpc_replica1, grpc_replica2) dev = get_blockdev(volume_name) restore_with_frontend(address, engine_name, backup1_info["URL"]) readed = read_dev(dev, offset, length) assert readed == snap1_data c = checksum_dev(dev) assert c == snap1_checksum # test a restore for the final backup reset_volume(grpc_controller, grpc_replica1, grpc_replica2) dev = get_blockdev(volume_name) restore_with_frontend(address, engine_name, backup3_info["URL"]) readed = read_dev(dev, offset, length) assert readed == snap3_data c = checksum_dev(dev) assert c == snap3_checksum rm_backups(address, engine_name, [backup1_info["URL"], backup3_info["URL"]]) cmd.sync_agent_server_reset(address) cleanup_controller(grpc_controller) cleanup_replica(grpc_replica1) cleanup_replica(grpc_replica2)
def snapshot_tree_verify_backup_node(dev, address, engine_name, offset, length, backup, data, name): restore_with_frontend(address, engine_name, backup[name]) readed = read_dev(dev, offset, length) assert readed == data[name]
def backup_core( bin, engine_manager_client, # NOQA grpc_controller_client, # NOQA grpc_replica_client, # NOQA grpc_replica_client2, # NOQA backup_target): open_replica(grpc_replica_client) open_replica(grpc_replica_client2) r1_url = grpc_replica_client.url r2_url = grpc_replica_client2.url v = grpc_controller_client.volume_start(replicas=[ r1_url, r2_url, ]) assert v.replicaCount == 2 backup_type = urlparse(backup_target).scheme # create & process backup1 snapshot1 = cmd.snapshot_create(grpc_controller_client.address) output = grpc_replica_client.replica_get().chain[1] assert output == 'volume-snap-{}.img'.format(snapshot1) backup1 = cmd.backup_create(grpc_controller_client.address, snapshot1, backup_target, { 'name': 'backup1', 'type': backup_type }) backup1_info = cmd.backup_inspect(grpc_controller_client.address, backup1) assert backup1_info["URL"] == backup1 assert backup1_info["IsIncremental"] is False assert backup1_info["VolumeName"] == VOLUME_NAME assert backup1_info["VolumeSize"] == SIZE_STR assert backup1_info["SnapshotName"] == snapshot1 assert len(backup1_info["Labels"]) == 2 assert backup1_info["Labels"]["name"] == "backup1" assert backup1_info["Labels"]["type"] == backup_type # create & process backup2 snapshot2 = cmd.snapshot_create(grpc_controller_client.address) output = grpc_replica_client.replica_get().chain[1] assert output == 'volume-snap-{}.img'.format(snapshot2) backup2 = cmd.backup_create(grpc_controller_client.address, snapshot2, backup_target) backup2_info = cmd.backup_inspect(grpc_controller_client.address, backup2) assert backup2_info["URL"] == backup2 assert backup2_info["IsIncremental"] is True assert backup2_info["VolumeName"] == VOLUME_NAME assert backup2_info["VolumeSize"] == SIZE_STR assert backup2_info["SnapshotName"] == snapshot2 if backup2_info["Labels"] is not None: assert len(backup2_info["Labels"]) == 0 # list all known backups for volume volume_info = cmd.backup_volume_list( grpc_controller_client.address, VOLUME_NAME, backup_target, include_backup_details=True)[VOLUME_NAME] assert volume_info["Name"] == VOLUME_NAME assert volume_info["Size"] == SIZE_STR backup_list = volume_info["Backups"] assert backup_list[backup1]["URL"] == backup1_info["URL"] assert backup_list[backup1]["SnapshotName"] == backup1_info["SnapshotName"] assert backup_list[backup1]["Size"] == backup1_info["Size"] assert backup_list[backup1]["Created"] == backup1_info["Created"] assert backup_list[backup1]["Messages"] is None assert backup_list[backup2]["URL"] == backup2_info["URL"] assert backup_list[backup2]["SnapshotName"] == backup2_info["SnapshotName"] assert backup_list[backup2]["Size"] == backup2_info["Size"] assert backup_list[backup2]["Created"] == backup2_info["Created"] assert backup_list[backup2]["Messages"] is None # test that corrupt backups are signaled during a list operation # https://github.com/longhorn/longhorn/issues/1212 volume_dir = finddir(BACKUP_DIR, VOLUME_NAME) assert volume_dir assert os.path.exists(volume_dir) backup_dir = os.path.join(volume_dir, "backups") assert os.path.exists(backup_dir) backup_cfg_name = "backup_" + backup2_info["Name"] + ".cfg" assert backup_cfg_name backup_cfg_path = findfile(backup_dir, backup_cfg_name) assert os.path.exists(backup_cfg_path) backup_tmp_cfg_path = os.path.join(volume_dir, backup_cfg_name) os.rename(backup_cfg_path, backup_tmp_cfg_path) assert os.path.exists(backup_tmp_cfg_path) corrupt_backup = open(backup_cfg_path, "w") assert corrupt_backup assert corrupt_backup.write("{corrupt: definitely") > 0 corrupt_backup.close() # request the new backup list volume_info = cmd.backup_volume_list( grpc_controller_client.address, VOLUME_NAME, backup_target, include_backup_details=True)[VOLUME_NAME] assert volume_info["Name"] == VOLUME_NAME backup_list = volume_info["Backups"] assert backup_list[backup1]["URL"] == backup1_info["URL"] assert backup_list[backup1]["Messages"] is None assert backup_list[backup2]["URL"] == backup2_info["URL"] assert MESSAGE_TYPE_ERROR in backup_list[backup2]["Messages"] # we still want to fail inspects, since they operate on urls # with no guarantee of backup existence with pytest.raises(subprocess.CalledProcessError): cmd.backup_inspect(grpc_controller_client.address, backup2) # switch back to valid cfg os.rename(backup_tmp_cfg_path, backup_cfg_path) assert cmd.backup_inspect(grpc_controller_client.address, backup2) # test that list returns a volume_info with an error message # for a missing volume.cfg instead of failing with an error # https://github.com/rancher/longhorn/issues/399 volume_cfg_path = findfile(volume_dir, VOLUME_CONFIG_FILE) assert os.path.exists(volume_cfg_path) volume_tmp_cfg_path = volume_cfg_path.replace(VOLUME_CONFIG_FILE, VOLUME_TMP_CONFIG_FILE) os.rename(volume_cfg_path, volume_tmp_cfg_path) assert os.path.exists(volume_tmp_cfg_path) volume_info = cmd.backup_volume_list(grpc_controller_client.address, "", backup_target) assert MESSAGE_TYPE_ERROR in volume_info[VOLUME_NAME]["Messages"] os.rename(volume_tmp_cfg_path, volume_cfg_path) assert os.path.exists(volume_cfg_path) volume_info = cmd.backup_volume_list(grpc_controller_client.address, "", backup_target) assert volume_info[VOLUME_NAME]["Messages"] is not None assert MESSAGE_TYPE_ERROR not in volume_info[VOLUME_NAME]["Messages"] # backup doesn't exists so it should error with pytest.raises(subprocess.CalledProcessError): url = backup_target + "?backup=backup-unk" + "&volume=" + VOLUME_NAME cmd.backup_inspect(grpc_controller_client.address, url) # this returns unsupported driver since `bad` is not a known scheme with pytest.raises(subprocess.CalledProcessError): cmd.backup_inspect(grpc_controller_client.address, "bad://xxx") reset_volume(grpc_controller_client, grpc_replica_client, grpc_replica_client2) restore_with_frontend(grpc_controller_client.address, ENGINE_NAME, backup1) restore_with_frontend(grpc_controller_client.address, ENGINE_NAME, backup2) # remove backups + volume cmd.backup_rm(grpc_controller_client.address, backup1) cmd.backup_rm(grpc_controller_client.address, backup2) cmd.backup_volume_rm(grpc_controller_client.address, VOLUME_NAME, backup_target) assert os.path.exists(BACKUP_DIR) assert not os.path.exists(volume_cfg_path)
def backup_hole_with_backing_file_test( backup_target, # NOQA grpc_backing_controller, # NOQA grpc_backing_replica1, # NOQA grpc_backing_replica2): # NOQA address = grpc_backing_controller.address dev = get_dev(grpc_backing_replica1, grpc_backing_replica2, grpc_backing_controller) volume_name = grpc_backing_controller.volume_get().name assert volume_name == VOLUME_BACKING_NAME offset1 = 512 length1 = 256 offset2 = 640 length2 = 256 boundary_offset = 0 boundary_length = 4100 # just pass 4096 into next 4k hole_offset = 2 * 1024 * 1024 hole_length = 1024 snap1_data = random_string(length1) verify_data(dev, offset1, snap1_data) snap1_checksum = checksum_dev(dev) snap1 = cmd.snapshot_create(address) boundary_data_backup1 = read_dev(dev, boundary_offset, boundary_length) hole_data_backup1 = read_dev(dev, hole_offset, hole_length) backup1_info = create_backup(address, snap1, backup_target, backing_image_name=BACKING_IMAGE_NAME, backing_image_url=BACKING_IMAGE_URL) snap2_data = random_string(length2) verify_data(dev, offset2, snap2_data) snap2_checksum = checksum_dev(dev) snap2 = cmd.snapshot_create(address) boundary_data_backup2 = read_dev(dev, boundary_offset, boundary_length) hole_data_backup2 = read_dev(dev, hole_offset, hole_length) backup2_info = create_backup(address, snap2, backup_target, backing_image_name=BACKING_IMAGE_NAME, backing_image_url=BACKING_IMAGE_URL) assert backup2_info["VolumeBackingImageName"] == BACKING_IMAGE_NAME assert backup2_info["VolumeBackingImageURL"] == BACKING_IMAGE_URL reset_volume(grpc_backing_controller, grpc_backing_replica1, grpc_backing_replica2) dev = get_blockdev(volume_name) restore_with_frontend(address, ENGINE_BACKING_NAME, backup1_info["URL"]) readed = read_dev(dev, boundary_offset, boundary_length) assert readed == boundary_data_backup1 readed = read_dev(dev, hole_offset, hole_length) assert readed == hole_data_backup1 c = checksum_dev(dev) assert c == snap1_checksum reset_volume(grpc_backing_controller, grpc_backing_replica1, grpc_backing_replica2) dev = get_blockdev(volume_name) restore_with_frontend(address, ENGINE_BACKING_NAME, backup2_info["URL"]) readed = read_dev(dev, boundary_offset, boundary_length) assert readed == boundary_data_backup2 readed = read_dev(dev, hole_offset, hole_length) assert readed == hole_data_backup2 c = checksum_dev(dev) assert c == snap2_checksum