def test_backup_volume_deletion( grpc_replica1, grpc_replica2, # NOQA grpc_controller, backup_targets): # NOQA offset = 0 length = 128 address = grpc_controller.address for backup_target in backup_targets: dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) snap_data = random_string(length) verify_data(dev, offset, snap_data) snap = cmd.snapshot_create(address) backup_info = create_backup(address, snap, backup_target) assert backup_info["VolumeName"] == VOLUME_NAME assert backup_info["Size"] == BLOCK_SIZE_STR assert snap in backup_info["SnapshotName"] cmd.backup_volume_rm(address, VOLUME_NAME, backup_target) info = cmd.backup_volume_list(address, VOLUME_NAME, backup_target) assert "cannot find" in info[VOLUME_NAME]["Messages"]["error"] cmd.sync_agent_server_reset(address) cleanup_controller(grpc_controller) cleanup_replica(grpc_replica1) cleanup_replica(grpc_replica2)
def test_ha_remove_extra_disks( grpc_controller, # NOQA grpc_replica1, grpc_replica2): # NOQA address = grpc_controller.address prepare_backup_dir(BACKUP_DIR) open_replica(grpc_replica1) replicas = grpc_controller.replica_list() assert len(replicas) == 0 r1_url = grpc_replica1.url v = grpc_controller.volume_start(replicas=[r1_url]) assert v.name == VOLUME_NAME assert v.replicaCount == 1 replicas = grpc_controller.replica_list() assert len(replicas) == 1 assert replicas[0].mode == "RW" dev = get_blockdev(VOLUME_NAME) wasted_data = random_string(128) data_offset = 1024 verify_data(dev, data_offset, wasted_data) # now replica1 contains extra data in a snapshot cmd.snapshot_create(address) cleanup_controller(grpc_controller) open_replica(grpc_replica2) replicas = grpc_controller.replica_list() assert len(replicas) == 0 r2_url = grpc_replica2.url v = grpc_controller.volume_start(replicas=[r2_url]) assert v.name == VOLUME_NAME assert v.replicaCount == 1 replicas = grpc_controller.replica_list() assert len(replicas) == 1 assert replicas[0].mode == "RW" dev = get_blockdev(VOLUME_NAME) data = random_string(128) data_offset = 1024 verify_data(dev, data_offset, data) r1 = grpc_replica1.replica_reload() print(r1) cmd.add_replica(address, r1_url) wait_for_rebuild_complete(address) verify_data(dev, data_offset, data)
def test_snapshot_tree_backup( grpc_replica1, grpc_replica2, # NOQA grpc_controller, backup_targets): # NOQA for backup_target in backup_targets: snapshot_tree_backup_test(backup_target, ENGINE_NAME, grpc_controller, grpc_replica1, grpc_replica2) cmd.sync_agent_server_reset(grpc_controller.address) cleanup_controller(grpc_controller) cleanup_replica(grpc_replica1) cleanup_replica(grpc_replica2)
def test_restore_to_file_without_backing_file( backup_targets, # NOQA grpc_controller, # NOQA grpc_replica1, # NOQA grpc_replica2): # NOQA for backup_target in backup_targets: restore_to_file_without_backing_file_test(backup_target, grpc_controller, grpc_replica1, grpc_replica2) cmd.sync_agent_server_reset(grpc_controller.address) cleanup_controller(grpc_controller) cleanup_replica(grpc_replica1) cleanup_replica(grpc_replica2)
def test_backup_with_backing_file( grpc_backing_replica1, grpc_backing_replica2, # NOQA grpc_backing_controller, backup_targets): # NOQA for backup_target in backup_targets: backup_with_backing_file_test(backup_target, grpc_backing_controller, grpc_backing_replica1, grpc_backing_replica2) cmd.sync_agent_server_reset(grpc_backing_controller.address) cleanup_controller(grpc_backing_controller) cleanup_replica(grpc_backing_replica1) cleanup_replica(grpc_backing_replica2)
def test_backup( grpc_replica1, grpc_replica2, # NOQA grpc_controller, backup_targets): # NOQA for backup_target in backup_targets: dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) backup_test(dev, grpc_controller.address, VOLUME_NAME, ENGINE_NAME, backup_target) cmd.sync_agent_server_reset(grpc_controller.address) cleanup_controller(grpc_controller) cleanup_replica(grpc_replica1) cleanup_replica(grpc_replica2)
def cleanup_no_frontend_volume(grpc_em, grpc_c, grpc_r1, grpc_r2): grpc_em.frontend_start(ENGINE_NO_FRONTEND_NAME, FRONTEND_TGT_BLOCKDEV) v = grpc_c.volume_get() assert v.frontendState == "up" cmd.sync_agent_server_reset(grpc_c.address) grpc_em.frontend_shutdown(ENGINE_NO_FRONTEND_NAME) v = grpc_c.volume_get() assert v.frontendState == "down" ep = grpc_em.engine_get(ENGINE_NO_FRONTEND_NAME) assert ep.spec.frontend == "" cleanup_controller(grpc_c) cleanup_replica(grpc_r1) cleanup_replica(grpc_r2) cleanup_replica_dir(FIXED_REPLICA_PATH1) cleanup_replica_dir(FIXED_REPLICA_PATH2)
def test_ha_double_replica_rebuild( grpc_controller, # NOQA grpc_replica1, grpc_replica2): # NOQA open_replica(grpc_replica1) open_replica(grpc_replica2) replicas = grpc_controller.replica_list() assert len(replicas) == 0 r1_url = grpc_replica1.url r2_url = grpc_replica2.url v = grpc_controller.volume_start(replicas=[r1_url, r2_url]) assert v.name == VOLUME_NAME assert v.replicaCount == 2 replicas = grpc_controller.replica_list() assert len(replicas) == 2 assert replicas[0].mode == "RW" assert replicas[1].mode == "RW" dev = get_blockdev(VOLUME_NAME) data1 = random_string(128) data1_offset = 1024 verify_data(dev, data1_offset, data1) # Close replica2 r2 = grpc_replica2.replica_get() assert r2.revisionCounter == 1 grpc_replica2.replica_close() verify_async(dev, 10, 128, 1) verify_replica_state(grpc_controller, 1, "ERR") verify_read(dev, data1_offset, data1) data2 = random_string(128) data2_offset = 512 verify_data(dev, data2_offset, data2) # Close replica1 r1 = grpc_replica1.replica_get() assert r1.revisionCounter == 12 # 1 + 10 + 1 grpc_replica1.replica_close() # Restart volume cleanup_controller(grpc_controller) replicas = grpc_controller.replica_list() assert len(replicas) == 0 # NOTE the order is reversed here r1_url = grpc_replica1.url r2_url = grpc_replica2.url v = grpc_controller.volume_start(replicas=[r2_url, r1_url]) assert v.replicaCount == 2 # replica2 is out because of lower revision counter replicas = grpc_controller.replica_list() assert len(replicas) == 2 assert replicas[0].mode == "ERR" assert replicas[1].mode == "RW" verify_read(dev, data1_offset, data1) verify_read(dev, data2_offset, data2) # Rebuild replica2 r2 = grpc_replica2.replica_get() assert r2.revisionCounter == 1 grpc_replica2.replica_close() grpc_controller.replica_delete(replicas[0].address) cmd.add_replica(grpc_controller.address, r2_url) wait_for_rebuild_complete(grpc_controller.address) verify_async(dev, 10, 128, 1) verify_replica_state(grpc_controller, 1, "RW") verify_read(dev, data1_offset, data1) verify_read(dev, data2_offset, data2) r1 = grpc_replica1.replica_get() r2 = grpc_replica2.replica_get() assert r1.revisionCounter == 22 # 1 + 10 + 1 + 10 assert r2.revisionCounter == 22 # must be in sync with r1
def restore_inc_test( grpc_engine_manager, # NOQA grpc_controller, # NOQA grpc_replica1, grpc_replica2, # NOQA grpc_dr_controller, # NOQA grpc_dr_replica1, grpc_dr_replica2, # NOQA backup_target): # NOQA address = grpc_controller.address dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) zero_string = b'\x00'.decode('utf-8') # backup0: 256 random data in 1st block length0 = 256 snap0_data = random_string(length0) verify_data(dev, 0, snap0_data) verify_data(dev, BLOCK_SIZE, snap0_data) snap0 = cmd.snapshot_create(address) backup0 = create_backup(address, snap0, backup_target)["URL"] backup0_name = cmd.backup_inspect(address, backup0)['Name'] # backup1: 32 random data + 32 zero data + 192 random data in 1st block length1 = 32 offset1 = 32 snap1_data = zero_string * length1 verify_data(dev, offset1, snap1_data) snap1 = cmd.snapshot_create(address) backup1 = create_backup(address, snap1, backup_target)["URL"] backup1_name = cmd.backup_inspect(address, backup1)['Name'] # backup2: 32 random data + 256 random data in 1st block, # 256 random data in 2nd block length2 = 256 offset2 = 32 snap2_data = random_string(length2) verify_data(dev, offset2, snap2_data) verify_data(dev, BLOCK_SIZE, snap2_data) snap2 = cmd.snapshot_create(address) backup2 = create_backup(address, snap2, backup_target)["URL"] backup2_name = cmd.backup_inspect(address, backup2)['Name'] # backup3: 64 zero data + 192 random data in 1st block length3 = 64 offset3 = 0 verify_data(dev, offset3, zero_string * length3) verify_data(dev, length2, zero_string * offset2) verify_data(dev, BLOCK_SIZE, zero_string * length2) snap3 = cmd.snapshot_create(address) backup3 = create_backup(address, snap3, backup_target)["URL"] backup3_name = cmd.backup_inspect(address, backup3)['Name'] # backup4: 256 random data in 1st block length4 = 256 offset4 = 0 snap4_data = random_string(length4) verify_data(dev, offset4, snap4_data) snap4 = cmd.snapshot_create(address) backup4 = create_backup(address, snap4, backup_target)["URL"] backup4_name = cmd.backup_inspect(address, backup4)['Name'] # start no-frontend volume # start dr volume (no frontend) dr_address = grpc_dr_controller.address start_no_frontend_volume(grpc_engine_manager, grpc_dr_controller, grpc_dr_replica1, grpc_dr_replica2) cmd.backup_restore(dr_address, backup0) wait_for_restore_completion(dr_address, backup0) verify_no_frontend_data(grpc_engine_manager, 0, snap0_data, grpc_dr_controller) # mock restore crash/error delta_file1 = "volume-delta-" + backup0_name + ".img" if "vfs" in backup_target: command = ["find", VFS_DIR, "-type", "d", "-name", VOLUME_NAME] backup_volume_path = subprocess.check_output(command).strip() command = ["find", backup_volume_path, "-name", "*blk"] blocks = subprocess.check_output(command).split() assert len(blocks) != 0 for blk in blocks: command = ["mv", blk, blk + ".tmp".encode('utf-8')] subprocess.check_output(command).strip() # should fail is_failed = False cmd.restore_inc(dr_address, backup1, backup0_name) for i in range(RETRY_COUNTS): rs = cmd.restore_status(dr_address) for status in rs.values(): if status['backupURL'] != backup1: break if 'error' in status.keys(): if status['error'] != "": assert 'no such file or directory' in \ status['error'] is_failed = True if is_failed: break time.sleep(RETRY_INTERVAL) assert is_failed assert path.exists(FIXED_REPLICA_PATH1 + delta_file1) assert path.exists(FIXED_REPLICA_PATH2 + delta_file1) for blk in blocks: command = ["mv", blk + ".tmp".encode('utf-8'), blk] subprocess.check_output(command) data1 = \ snap0_data[0:offset1] + snap1_data + \ snap0_data[offset1+length1:] # race condition: last restoration has failed # but `isRestoring` hasn't been cleanup for i in range(RETRY_COUNTS): try: restore_incrementally(dr_address, backup1, backup0_name) break except subprocess.CalledProcessError as e: if "already in progress" not in e.output: time.sleep(RETRY_INTERVAL) else: raise e verify_no_frontend_data(grpc_engine_manager, 0, data1, grpc_dr_controller) assert not path.exists(FIXED_REPLICA_PATH1 + delta_file1) assert not path.exists(FIXED_REPLICA_PATH2 + delta_file1) status = cmd.restore_status(dr_address) compare_last_restored_with_backup(status, backup1_name) data2 = \ data1[0:offset2] + snap2_data + \ zero_string * (BLOCK_SIZE - length2 - offset2) + snap2_data restore_incrementally(dr_address, backup2, backup1_name) verify_no_frontend_data(grpc_engine_manager, 0, data2, grpc_dr_controller) delta_file2 = "volume-delta-" + backup1_name + ".img" assert not path.exists(FIXED_REPLICA_PATH1 + delta_file2) assert not path.exists(FIXED_REPLICA_PATH2 + delta_file2) status = cmd.restore_status(dr_address) compare_last_restored_with_backup(status, backup2_name) # mock race condition with pytest.raises(subprocess.CalledProcessError) as e: restore_incrementally(dr_address, backup1, backup0_name) assert "doesn't match lastRestored" in e data3 = zero_string * length3 + data2[length3:length2] restore_incrementally(dr_address, backup3, backup2_name) verify_no_frontend_data(grpc_engine_manager, 0, data3, grpc_dr_controller) delta_file3 = "volume-delta-" + backup3_name + ".img" assert not path.exists(FIXED_REPLICA_PATH1 + delta_file3) assert not path.exists(FIXED_REPLICA_PATH2 + delta_file3) status = cmd.restore_status(dr_address) compare_last_restored_with_backup(status, backup3_name) # mock corner case: invalid last-restored backup rm_backups(address, ENGINE_NAME, [backup3]) # actually it is full restoration restore_incrementally(dr_address, backup4, backup3_name) verify_no_frontend_data(grpc_engine_manager, 0, snap4_data, grpc_dr_controller) status = cmd.restore_status(dr_address) compare_last_restored_with_backup(status, backup4_name) if "vfs" in backup_target: command = ["find", VFS_DIR, "-type", "d", "-name", VOLUME_NAME] backup_volume_path = subprocess.check_output(command).strip() command = ["find", backup_volume_path, "-name", "*tempoary"] tmp_files = subprocess.check_output(command).split() assert len(tmp_files) == 0 cleanup_no_frontend_volume(grpc_engine_manager, grpc_dr_controller, grpc_dr_replica1, grpc_dr_replica2) rm_backups(address, ENGINE_NAME, [backup0, backup1, backup2, backup4]) cmd.sync_agent_server_reset(address) cleanup_controller(grpc_controller) cleanup_replica(grpc_replica1) cleanup_replica(grpc_replica2)
def test_backup_type( grpc_replica1, grpc_replica2, # NOQA grpc_controller, backup_targets): # NOQA for backup_target in backup_targets: address = grpc_controller.address block_size = 2 * 1024 * 1024 dev = get_dev(grpc_replica1, grpc_replica2, grpc_controller) zero_string = b'\x00'.decode('utf-8') # backup0: 256 random data in 1st block length0 = 256 snap0_data = random_string(length0) verify_data(dev, 0, snap0_data) verify_data(dev, block_size, snap0_data) snap0 = cmd.snapshot_create(address) backup0 = create_backup(address, snap0, backup_target) backup0_url = backup0["URL"] assert backup0['IsIncremental'] is False # backup1: 32 random data + 32 zero data + 192 random data in 1st block length1 = 32 offset1 = 32 snap1_data = zero_string * length1 verify_data(dev, offset1, snap1_data) snap1 = cmd.snapshot_create(address) backup1 = create_backup(address, snap1, backup_target) backup1_url = backup1["URL"] assert backup1['IsIncremental'] is True # backup2: 32 random data + 256 random data in 1st block, # 256 random data in 2nd block length2 = 256 offset2 = 32 snap2_data = random_string(length2) verify_data(dev, offset2, snap2_data) verify_data(dev, block_size, snap2_data) snap2 = cmd.snapshot_create(address) backup2 = create_backup(address, snap2, backup_target) backup2_url = backup2["URL"] assert backup2['IsIncremental'] is True rm_backups(address, ENGINE_NAME, [backup2_url]) # backup3: 64 zero data + 192 random data in 1st block length3 = 64 offset3 = 0 verify_data(dev, offset3, zero_string * length3) verify_data(dev, length2, zero_string * offset2) verify_data(dev, block_size, zero_string * length2) snap3 = cmd.snapshot_create(address) backup3 = create_backup(address, snap3, backup_target) backup3_url = backup3["URL"] assert backup3['IsIncremental'] is False # backup4: 256 random data in 1st block length4 = 256 offset4 = 0 snap4_data = random_string(length4) verify_data(dev, offset4, snap4_data) snap4 = cmd.snapshot_create(address) backup4 = create_backup(address, snap4, backup_target) backup4_url = backup4["URL"] assert backup4['IsIncremental'] is True rm_backups(address, ENGINE_NAME, [backup0_url, backup1_url, backup3_url, backup4_url]) cmd.sync_agent_server_reset(address) cleanup_replica(grpc_replica1) cleanup_replica(grpc_replica2) cleanup_controller(grpc_controller)