def test_inc_restore_with_rebuild_and_expansion( grpc_controller, grpc_replica1, grpc_replica2, grpc_controller_no_frontend, grpc_fixed_dir_replica1, grpc_fixed_dir_replica2, backup_targets): # NOQA # Pick up a random backup target. backup_target = backup_targets[random.randint(0, 1)] address = grpc_controller.address dr_address = grpc_controller_no_frontend.address try: cmd.backup_volume_rm(address, VOLUME_NAME, backup_target) except Exception: pass dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) start_no_frontend_volume(grpc_controller_no_frontend, grpc_fixed_dir_replica1) data0_len = random_length(PAGE_SIZE) data0 = Data(random.randrange(0, SIZE-2*PAGE_SIZE, PAGE_SIZE), data0_len, random_string(data0_len)) snap0 = Snapshot(dev, data0, address) backup0_info = create_backup(address, snap0.name, backup_target) assert backup0_info["VolumeName"] == VOLUME_NAME assert backup0_info["Size"] == str(BLOCK_SIZE) cmd.backup_restore(dr_address, backup0_info["URL"]) wait_for_restore_completion(dr_address, backup0_info["URL"]) verify_no_frontend_data(data0.offset, data0.content, grpc_controller_no_frontend) expand_volume_with_frontend(grpc_controller, EXPANDED_SIZE) wait_and_check_volume_expansion( grpc_controller, EXPANDED_SIZE) data1_len = random_length(PAGE_SIZE) data1 = Data(random.randrange(SIZE, EXPANDED_SIZE-PAGE_SIZE, PAGE_SIZE), data1_len, random_string(data1_len)) snap1 = Snapshot(dev, data1, address) backup1_info = create_backup(address, snap1.name, backup_target, EXPANDED_SIZE_STR) assert backup1_info["VolumeName"] == VOLUME_NAME assert backup1_info["Size"] == str(2*BLOCK_SIZE) backup_volumes = cmd.backup_volume_list(address, VOLUME_NAME, backup_target) assert VOLUME_NAME in backup_volumes assert backup_volumes[VOLUME_NAME]["Size"] == EXPANDED_SIZE_STR # restore command invocation should error out with pytest.raises(subprocess.CalledProcessError) as e: cmd.backup_restore(dr_address, backup1_info["URL"]) assert "need to expand the DR volume" in e.value.stdout # The above restore error is triggered before calling the replicas. # Hence the error won't be recorded in the restore status # and we can continue restoring backups for the DR volume. rs = cmd.restore_status(dr_address) for status in rs.values(): assert status['backupURL'] == backup0_info["URL"] assert status['lastRestored'] == backup0_info["Name"] assert 'error' not in status.keys() assert not status["isRestoring"] grpc_controller_no_frontend.volume_expand(EXPANDED_SIZE) wait_for_volume_expansion(grpc_controller_no_frontend, EXPANDED_SIZE) # This restore command will trigger snapshot purge. # And the error is triggered before calling the replicas. with pytest.raises(subprocess.CalledProcessError) as e: cmd.backup_restore(dr_address, backup1_info["URL"]) assert "found more than 1 snapshot in the replicas, " \ "hence started to purge snapshots before the restore" \ in e.value.stdout wait_for_purge_completion(dr_address) snaps_info = cmd.snapshot_info(dr_address) assert len(snaps_info) == 2 volume_head_name = "volume-head" snap_name = "expand-" + EXPANDED_SIZE_STR head_info = snaps_info[volume_head_name] assert head_info["name"] == volume_head_name assert head_info["parent"] == snap_name assert not head_info["children"] assert head_info["usercreated"] is False snap_info = snaps_info[snap_name] assert snap_info["name"] == snap_name assert not snap_info["parent"] assert volume_head_name in snap_info["children"] assert snap_info["usercreated"] is False cmd.backup_restore(dr_address, backup1_info["URL"]) wait_for_restore_completion(dr_address, backup1_info["URL"]) verify_no_frontend_data(data1.offset, data1.content, grpc_controller_no_frontend) # For DR volume, the rebuilding replica won't be expanded automatically. open_replica(grpc_fixed_dir_replica2) with pytest.raises(subprocess.CalledProcessError): cmd.add_replica(dr_address, grpc_fixed_dir_replica2.url, True) # Manually expand the rebuilding replica then retry `add-replica`. grpc_fixed_dir_replica2.replica_open() grpc_fixed_dir_replica2.replica_expand(EXPANDED_SIZE) grpc_fixed_dir_replica2.replica_close() cmd.add_replica(dr_address, grpc_fixed_dir_replica2.url, True) replicas = grpc_controller_no_frontend.replica_list() assert len(replicas) == 2 rw_replica, wo_replica = 0, 0 for r in replicas: if r.mode == 'RW': rw_replica += 1 else: assert r.mode == "WO" wo_replica += 1 assert rw_replica == 1 and wo_replica == 1 # The old replica will fail the restore but the error won't be recorded. # Then rebuilding replica will start full restore. with pytest.raises(subprocess.CalledProcessError) as e: cmd.backup_restore(dr_address, backup1_info["URL"]) assert "already restored backup" in e.value.stdout wait_for_restore_completion(dr_address, backup1_info["URL"]) cmd.verify_rebuild_replica(dr_address, grpc_fixed_dir_replica2.url) replicas = grpc_controller_no_frontend.replica_list() assert len(replicas) == 2 for r in replicas: assert r.mode == 'RW' verify_no_frontend_data(data1.offset, data1.content, grpc_controller_no_frontend) cmd.backup_volume_rm(grpc_controller.address, VOLUME_NAME, backup_target)
def test_restore_with_rebuild( grpc_controller, grpc_replica1, grpc_replica2, grpc_controller_no_frontend, grpc_fixed_dir_replica1, grpc_fixed_dir_replica2, backup_targets): # NOQA # Pick up a random backup target. backup_target = backup_targets[random.randint(0, 1)] address = grpc_controller.address dr_address = grpc_controller_no_frontend.address try: cmd.backup_volume_rm(address, VOLUME_NAME, backup_target) except Exception: pass dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) start_no_frontend_volume(grpc_controller_no_frontend, grpc_fixed_dir_replica1) data0_len = random_length(PAGE_SIZE) data0 = Data(random.randrange(0, SIZE-2*PAGE_SIZE, PAGE_SIZE), data0_len, random_string(data0_len)) snap0 = Snapshot(dev, data0, address) backup0_info = create_backup(address, snap0.name, backup_target) assert backup0_info["VolumeName"] == VOLUME_NAME assert backup0_info["Size"] == str(BLOCK_SIZE) cmd.backup_restore(dr_address, backup0_info["URL"]) wait_for_restore_completion(dr_address, backup0_info["URL"]) verify_no_frontend_data(data0.offset, data0.content, grpc_controller_no_frontend) open_replica(grpc_fixed_dir_replica2) cmd.add_replica(dr_address, grpc_fixed_dir_replica2.url, True) replicas = grpc_controller_no_frontend.replica_list() assert len(replicas) == 2 rw_replica, wo_replica = 0, 0 for r in replicas: if r.mode == 'RW': rw_replica += 1 else: assert r.mode == "WO" wo_replica += 1 assert rw_replica == 1 and wo_replica == 1 # The old replica will fail the restore but the error won't be recorded. # Then rebuilding replica will start full restore. with pytest.raises(subprocess.CalledProcessError) as e: cmd.backup_restore(dr_address, backup0_info["URL"]) assert "already restored backup" in e.value.stdout wait_for_restore_completion(dr_address, backup0_info["URL"]) # Need to manually verify the rebuilding replica for the restore volume cmd.verify_rebuild_replica(dr_address, grpc_fixed_dir_replica2.url) replicas = grpc_controller_no_frontend.replica_list() assert len(replicas) == 2 for r in replicas: assert r.mode == 'RW' # Delete the old replica then check if the rebuilt replica works fine. cleanup_replica(grpc_fixed_dir_replica1) grpc_controller_no_frontend.replica_delete(grpc_fixed_dir_replica1.address) verify_no_frontend_data(data0.offset, data0.content, grpc_controller_no_frontend) cmd.backup_volume_rm(grpc_controller.address, VOLUME_NAME, backup_target)
def volume_expansion_with_backup_test( grpc_engine_manager, # NOQA grpc_controller, # NOQA grpc_dr_controller, # NOQA grpc_replica1, grpc_replica2, # NOQA grpc_dr_replica1, # NOQA grpc_dr_replica2, # NOQA volume_name, engine_name, backup_target): # NOQA address = grpc_controller.address dr_address = grpc_dr_controller.address dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) start_no_frontend_volume(grpc_engine_manager, grpc_dr_controller, grpc_dr_replica1, grpc_dr_replica2) try: cmd.backup_volume_rm(address, volume_name, backup_target) except Exception: pass data0_len = random_length(PAGE_SIZE) data0 = Data(random.randrange(0, SIZE - 2 * PAGE_SIZE, PAGE_SIZE), data0_len, random_string(data0_len)) snap0 = Snapshot(dev, data0, address) backup0_info = create_backup(address, snap0.name, backup_target) assert backup0_info["VolumeName"] == volume_name assert backup0_info["Size"] == str(BLOCK_SIZE) cmd.backup_restore(dr_address, backup0_info["URL"]) wait_for_restore_completion(dr_address, backup0_info["URL"]) verify_no_frontend_data(grpc_engine_manager, data0.offset, data0.content, grpc_dr_controller) expand_volume_with_frontend(grpc_controller, EXPANDED_SIZE) wait_and_check_volume_expansion(grpc_controller, EXPANDED_SIZE) data1_len = random_length(PAGE_SIZE) data1 = Data(random.randrange(SIZE, EXPANDED_SIZE - PAGE_SIZE, PAGE_SIZE), data1_len, random_string(data1_len)) snap1 = Snapshot(dev, data1, address) backup1_info = create_backup(address, snap1.name, backup_target, EXPANDED_SIZE_STR) assert backup1_info["VolumeName"] == volume_name assert backup1_info["Size"] == str(2 * BLOCK_SIZE) backup_volumes = cmd.backup_volume_list(address, volume_name, backup_target) assert volume_name in backup_volumes assert backup_volumes[volume_name]["Size"] == EXPANDED_SIZE_STR # incremental restoration will implicitly expand the volume first restore_incrementally(dr_address, backup1_info["URL"], backup0_info["Name"]) check_dr_volume_block_device_size(grpc_engine_manager, grpc_dr_controller, EXPANDED_SIZE) verify_no_frontend_data(grpc_engine_manager, data1.offset, data1.content, grpc_dr_controller) cmd.backup_volume_rm(grpc_controller.address, volume_name, backup_target)
def test_backup_volume_list( grpc_replica_client, grpc_controller_client, # NOQA grpc_replica1, grpc_replica2, # NOQA grpc_controller, backup_targets): # NOQA """ Test backup volume list Context: We want to make sure that an error when listing a single backup volume does not stop us from listing all the other backup volumes. Otherwise a single faulty backup can block the retrieval of all known backup volumes. Steps: 1. Create a volume(1) and attach to the current node 2. Create a volume(2) and attach to the current node 3. write some data to volume(1) & volume(2) 4. Create a backup of volume(1) & volume(2) 5. request a backup list 6. verify backup list contains no error messages for volume(1) 7. verify backup list contains no error messages for volume(2) 8. place a file named "*****@*****.**" into the backups folder of volume(1) 9. request a backup list 10. verify backup list contains `Invalid name` error messages for volume(1) 11. verify backup list contains no error messages for volume(2) 12. delete backup volumes(1 & 2) 13. cleanup """ # create a second volume grpc2_replica1 = grpc_replica_client(REPLICA_2_NAME + "-1") grpc2_replica2 = grpc_replica_client(REPLICA_2_NAME + "-2") grpc2_controller = grpc_controller_client(ENGINE2_NAME, VOLUME2_NAME) offset = 0 length = 128 address = grpc_controller.address address2 = grpc2_controller.address for backup_target in backup_targets: dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) dev2 = get_dev(grpc2_replica1, grpc2_replica2, grpc2_controller) # create a regular backup snap_data = random_string(length) verify_data(dev, offset, snap_data) snap = cmd.snapshot_create(address) backup_info = create_backup(address, snap, backup_target) assert backup_info["VolumeName"] == VOLUME_NAME assert backup_info["Size"] == BLOCK_SIZE_STR assert snap in backup_info["SnapshotName"] # create a regular backup on volume 2 verify_data(dev2, offset, random_string(length)) snap = cmd.snapshot_create(address2) backup_info = create_backup(address2, snap, backup_target) assert backup_info["VolumeName"] == VOLUME2_NAME assert backup_info["Size"] == BLOCK_SIZE_STR assert snap in backup_info["SnapshotName"] # request a volume list info = cmd.backup_volume_list(address, "", backup_target) assert info[VOLUME_NAME]["Name"] == VOLUME_NAME assert MESSAGE_TYPE_ERROR not in info[VOLUME_NAME]["Messages"] assert info[VOLUME2_NAME]["Name"] == VOLUME2_NAME assert MESSAGE_TYPE_ERROR not in info[VOLUME2_NAME]["Messages"] # place badly named backup.cfg file # we want the list call to return correctly and # include an error message otherwise a single volume error # can stop all backup volumes from showing up backup_dir = os.path.join(finddir(BACKUP_DIR, VOLUME_NAME), "backups") cfg = open(os.path.join(backup_dir, "*****@*****.**"), "w") cfg.close() info = cmd.backup_volume_list(address, "", backup_target, include_backup_details=True) assert "Invalid name" in info[VOLUME_NAME]["Messages"]["error"] assert info[VOLUME2_NAME]["Name"] == VOLUME2_NAME assert MESSAGE_TYPE_ERROR not in info[VOLUME2_NAME]["Messages"] # remove the volume with the badly named backup.cfg cmd.backup_volume_rm(address, VOLUME_NAME, backup_target) info = cmd.backup_volume_list(address, VOLUME_NAME, backup_target, include_backup_details=True) assert "cannot find" in info[VOLUME_NAME]["Messages"]["error"] # remove volume 2 backups cmd.backup_volume_rm(address, VOLUME2_NAME, backup_target) info = cmd.backup_volume_list(address, VOLUME2_NAME, backup_target, include_backup_details=True) assert "cannot find" in info[VOLUME2_NAME]["Messages"]["error"] # cleanup volume 1 cmd.sync_agent_server_reset(address) cleanup_controller(grpc_controller) cleanup_replica(grpc_replica1) cleanup_replica(grpc_replica2) # cleanup volume 2 cmd.sync_agent_server_reset(address2) cleanup_controller(grpc2_controller) cleanup_replica(grpc2_replica1) cleanup_replica(grpc2_replica2)
def test_backup_block_no_cleanup( grpc_replica1, grpc_replica2, # NOQA grpc_controller, backup_targets): # NOQA address = grpc_controller.address length = 128 for backup_target in backup_targets: dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) # write two backup blocks verify_data(dev, 0, random_string(length)) verify_data(dev, BLOCK_SIZE, random_string(length)) snap = cmd.snapshot_create(address) backup1 = create_backup(address, snap, backup_target) assert backup1["VolumeName"] == VOLUME_NAME assert backup1["Size"] == str(BLOCK_SIZE * 2) assert snap in backup1["SnapshotName"] check_backup_volume_block_count(address, VOLUME_NAME, backup_target, 2) # overwrite second backup block verify_data(dev, BLOCK_SIZE, random_string(length)) snap = cmd.snapshot_create(address) backup2 = create_backup(address, snap, backup_target) assert backup2["VolumeName"] == VOLUME_NAME assert backup2["Size"] == str(BLOCK_SIZE * 2) assert snap in backup2["SnapshotName"] # check that the volume now has 3 blocks # backup1 and backup2 share the first block # and have different second blocks check_backup_volume_block_count(address, VOLUME_NAME, backup_target, 3) # create an artificial in progress backup # that will stop the gc from removing blocks in_progress_backup_file = create_in_progress_backup_file(VOLUME_NAME) # remove backup 1 the volume should still have 3 blocks cmd.backup_rm(address, backup1["URL"]) check_backup_volume_block_count(address, VOLUME_NAME, backup_target, 3) # remove the in progress backup os.remove(in_progress_backup_file) # remove the last remaining backup 2 # this should remove all blocks # including the orphaned block from backup 1 cmd.backup_rm(address, backup2["URL"]) check_backup_volume_block_count(address, VOLUME_NAME, backup_target, 0) # cleanup the backup volume cmd.backup_volume_rm(address, VOLUME_NAME, backup_target) info = cmd.backup_volume_list(address, VOLUME_NAME, backup_target)[VOLUME_NAME] assert "cannot find" in info["Messages"]["error"] cmd.sync_agent_server_reset(address) cleanup_controller(grpc_controller) cleanup_replica(grpc_replica1) cleanup_replica(grpc_replica2)
def test_backup_block_deletion( grpc_replica1, grpc_replica2, # NOQA grpc_controller, backup_targets): # NOQA address = grpc_controller.address length = 128 for backup_target in backup_targets: dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) # write two backup block verify_data(dev, 0, random_string(length)) verify_data(dev, BLOCK_SIZE, random_string(length)) snap = cmd.snapshot_create(address) backup1 = create_backup(address, snap, backup_target) assert backup1["VolumeName"] == VOLUME_NAME assert backup1["Size"] == str(BLOCK_SIZE * 2) assert snap in backup1["SnapshotName"] # test block deduplication backup1_duplicate = create_backup(address, snap, backup_target) assert backup1_duplicate["VolumeName"] == VOLUME_NAME assert backup1_duplicate["Size"] == str(BLOCK_SIZE * 2) assert snap in backup1_duplicate["SnapshotName"] check_backup_volume_block_count(address, VOLUME_NAME, backup_target, 2) # overwrite second backup block verify_data(dev, BLOCK_SIZE, random_string(length)) snap = cmd.snapshot_create(address) backup2 = create_backup(address, snap, backup_target) assert backup2["VolumeName"] == VOLUME_NAME assert backup2["Size"] == str(BLOCK_SIZE * 2) assert snap in backup2["SnapshotName"] # check that the volume now has 3 blocks # backup1 and backup2 share the first block # and have different second blocks check_backup_volume_block_count(address, VOLUME_NAME, backup_target, 3) # remove backup 1 duplicate # this should not change the blocks on disk # since all blocks are still required cmd.backup_rm(address, backup1_duplicate["URL"]) check_backup_volume_block_count(address, VOLUME_NAME, backup_target, 3) # remove backup 1 # the volume should now have 2 blocks # blk1 from backup1 should still be present # since it's required by backup 2 cmd.backup_rm(address, backup1["URL"]) check_backup_volume_block_count(address, VOLUME_NAME, backup_target, 2) # remove the last remaining backup 2 # this should remove all blocks cmd.backup_rm(address, backup2["URL"]) check_backup_volume_block_count(address, VOLUME_NAME, backup_target, 0) # cleanup the backup volume cmd.backup_volume_rm(address, VOLUME_NAME, backup_target) info = cmd.backup_volume_list(address, VOLUME_NAME, backup_target)[VOLUME_NAME] assert "cannot find" in info["Messages"]["error"] cmd.sync_agent_server_reset(address) cleanup_controller(grpc_controller) cleanup_replica(grpc_replica1) cleanup_replica(grpc_replica2)
def backup_core( bin, engine_manager_client, # NOQA grpc_controller_client, # NOQA grpc_replica_client, # NOQA grpc_replica_client2, # NOQA backup_target): open_replica(grpc_replica_client) open_replica(grpc_replica_client2) r1_url = grpc_replica_client.url r2_url = grpc_replica_client2.url v = grpc_controller_client.volume_start(replicas=[ r1_url, r2_url, ]) assert v.replicaCount == 2 backup_type = urlparse(backup_target).scheme # create & process backup1 snapshot1 = cmd.snapshot_create(grpc_controller_client.address) output = grpc_replica_client.replica_get().chain[1] assert output == 'volume-snap-{}.img'.format(snapshot1) backup1 = cmd.backup_create(grpc_controller_client.address, snapshot1, backup_target, { 'name': 'backup1', 'type': backup_type }) backup1_info = cmd.backup_inspect(grpc_controller_client.address, backup1) assert backup1_info["URL"] == backup1 assert backup1_info["IsIncremental"] is False assert backup1_info["VolumeName"] == VOLUME_NAME assert backup1_info["VolumeSize"] == SIZE_STR assert backup1_info["SnapshotName"] == snapshot1 assert len(backup1_info["Labels"]) == 2 assert backup1_info["Labels"]["name"] == "backup1" assert backup1_info["Labels"]["type"] == backup_type # create & process backup2 snapshot2 = cmd.snapshot_create(grpc_controller_client.address) output = grpc_replica_client.replica_get().chain[1] assert output == 'volume-snap-{}.img'.format(snapshot2) backup2 = cmd.backup_create(grpc_controller_client.address, snapshot2, backup_target) backup2_info = cmd.backup_inspect(grpc_controller_client.address, backup2) assert backup2_info["URL"] == backup2 assert backup2_info["IsIncremental"] is True assert backup2_info["VolumeName"] == VOLUME_NAME assert backup2_info["VolumeSize"] == SIZE_STR assert backup2_info["SnapshotName"] == snapshot2 if backup2_info["Labels"] is not None: assert len(backup2_info["Labels"]) == 0 # list all known backups for volume volume_info = cmd.backup_volume_list( grpc_controller_client.address, VOLUME_NAME, backup_target, include_backup_details=True)[VOLUME_NAME] assert volume_info["Name"] == VOLUME_NAME assert volume_info["Size"] == SIZE_STR backup_list = volume_info["Backups"] assert backup_list[backup1]["URL"] == backup1_info["URL"] assert backup_list[backup1]["SnapshotName"] == backup1_info["SnapshotName"] assert backup_list[backup1]["Size"] == backup1_info["Size"] assert backup_list[backup1]["Created"] == backup1_info["Created"] assert backup_list[backup1]["Messages"] is None assert backup_list[backup2]["URL"] == backup2_info["URL"] assert backup_list[backup2]["SnapshotName"] == backup2_info["SnapshotName"] assert backup_list[backup2]["Size"] == backup2_info["Size"] assert backup_list[backup2]["Created"] == backup2_info["Created"] assert backup_list[backup2]["Messages"] is None # test that corrupt backups are signaled during a list operation # https://github.com/longhorn/longhorn/issues/1212 volume_dir = finddir(BACKUP_DIR, VOLUME_NAME) assert volume_dir assert os.path.exists(volume_dir) backup_dir = os.path.join(volume_dir, "backups") assert os.path.exists(backup_dir) backup_cfg_name = "backup_" + backup2_info["Name"] + ".cfg" assert backup_cfg_name backup_cfg_path = findfile(backup_dir, backup_cfg_name) assert os.path.exists(backup_cfg_path) backup_tmp_cfg_path = os.path.join(volume_dir, backup_cfg_name) os.rename(backup_cfg_path, backup_tmp_cfg_path) assert os.path.exists(backup_tmp_cfg_path) corrupt_backup = open(backup_cfg_path, "w") assert corrupt_backup assert corrupt_backup.write("{corrupt: definitely") > 0 corrupt_backup.close() # request the new backup list volume_info = cmd.backup_volume_list( grpc_controller_client.address, VOLUME_NAME, backup_target, include_backup_details=True)[VOLUME_NAME] assert volume_info["Name"] == VOLUME_NAME backup_list = volume_info["Backups"] assert backup_list[backup1]["URL"] == backup1_info["URL"] assert backup_list[backup1]["Messages"] is None assert backup_list[backup2]["URL"] == backup2_info["URL"] assert MESSAGE_TYPE_ERROR in backup_list[backup2]["Messages"] # we still want to fail inspects, since they operate on urls # with no guarantee of backup existence with pytest.raises(subprocess.CalledProcessError): cmd.backup_inspect(grpc_controller_client.address, backup2) # switch back to valid cfg os.rename(backup_tmp_cfg_path, backup_cfg_path) assert cmd.backup_inspect(grpc_controller_client.address, backup2) # test that list returns a volume_info with an error message # for a missing volume.cfg instead of failing with an error # https://github.com/rancher/longhorn/issues/399 volume_cfg_path = findfile(volume_dir, VOLUME_CONFIG_FILE) assert os.path.exists(volume_cfg_path) volume_tmp_cfg_path = volume_cfg_path.replace(VOLUME_CONFIG_FILE, VOLUME_TMP_CONFIG_FILE) os.rename(volume_cfg_path, volume_tmp_cfg_path) assert os.path.exists(volume_tmp_cfg_path) volume_info = cmd.backup_volume_list(grpc_controller_client.address, "", backup_target) assert MESSAGE_TYPE_ERROR in volume_info[VOLUME_NAME]["Messages"] os.rename(volume_tmp_cfg_path, volume_cfg_path) assert os.path.exists(volume_cfg_path) volume_info = cmd.backup_volume_list(grpc_controller_client.address, "", backup_target) assert volume_info[VOLUME_NAME]["Messages"] is not None assert MESSAGE_TYPE_ERROR not in volume_info[VOLUME_NAME]["Messages"] # backup doesn't exists so it should error with pytest.raises(subprocess.CalledProcessError): url = backup_target + "?backup=backup-unk" + "&volume=" + VOLUME_NAME cmd.backup_inspect(grpc_controller_client.address, url) # this returns unsupported driver since `bad` is not a known scheme with pytest.raises(subprocess.CalledProcessError): cmd.backup_inspect(grpc_controller_client.address, "bad://xxx") reset_volume(grpc_controller_client, grpc_replica_client, grpc_replica_client2) restore_with_frontend(grpc_controller_client.address, ENGINE_NAME, backup1) restore_with_frontend(grpc_controller_client.address, ENGINE_NAME, backup2) # remove backups + volume cmd.backup_rm(grpc_controller_client.address, backup1) cmd.backup_rm(grpc_controller_client.address, backup2) cmd.backup_volume_rm(grpc_controller_client.address, VOLUME_NAME, backup_target) assert os.path.exists(BACKUP_DIR) assert not os.path.exists(volume_cfg_path)