async def test_autosync_placeholder_user_manifest( frozen_clock, running_backend, backend_data_binder, event_bus_factory, core_config, coolorg, alice, alice2, ): # Sync with realm&vlob not creation on server side await backend_data_binder.bind_organization( coolorg, alice, initial_user_manifest="not_synced") # Don't use `core_factory` fixture given it whole point is to waits for # monitors to be idle before returning the core async with logged_core_factory( core_config, alice, event_bus=event_bus_factory()) as alice_core: # Wait for the sync monitor to sync the new workspace with alice_core.event_bus.listen() as spy: await frozen_clock.sleep_with_autojump(60) await spy.wait_with_timeout(CoreEvent.FS_ENTRY_SYNCED, {"id": alice.user_manifest_id}) # Sync with existing realm&vlob on server side await backend_data_binder.bind_device(alice2) async with logged_core_factory( core_config, alice2, event_bus=event_bus_factory()) as alice2_core: with alice2_core.event_bus.listen() as spy: # Wait for the sync monitor to sync the new workspace await frozen_clock.sleep_with_autojump(60) await spy.wait_with_timeout(CoreEvent.FS_ENTRY_REMOTE_CHANGED, { "id": alice2.user_manifest_id, "path": "/" })
async def test_autosync_placeholder_workspace_manifest( frozen_clock, running_backend, backend_data_binder, event_bus_factory, core_config, coolorg, alice, alice2, ): # Workspace created before user manifest placeholder sync await backend_data_binder.bind_organization( coolorg, alice, initial_user_manifest="not_synced") # Don't use `core_factory` fixture given it whole point is to waits for # monitors to be idle before returning the core async with logged_core_factory( core_config, alice, event_bus=event_bus_factory()) as alice_core: with alice_core.event_bus.listen() as spy: w1id = await alice_core.user_fs.workspace_create(EntryName("w1")) # Wait for the sync monitor to sync the new workspace await frozen_clock.sleep_with_autojump(60) await spy.wait_multiple_with_timeout( [ (CoreEvent.FS_ENTRY_SYNCED, { "id": alice.user_manifest_id }), (CoreEvent.FS_ENTRY_SYNCED, { "workspace_id": w1id, "id": w1id }), ], in_order=False, ) # Workspace created on a synced user manifest await backend_data_binder.bind_device(alice2) async with logged_core_factory( core_config, alice2, event_bus=event_bus_factory()) as alice2_core: # Workspace created before user manifest placeholder sync with alice2_core.event_bus.listen() as spy: w2id = await alice2_core.user_fs.workspace_create(EntryName("w2")) await frozen_clock.sleep_with_autojump(60) await spy.wait_multiple_with_timeout( [ (CoreEvent.FS_ENTRY_SYNCED, { "id": alice2.user_manifest_id }), (CoreEvent.FS_ENTRY_SYNCED, { "workspace_id": w2id, "id": w2id }), ], in_order=False, )
async def _core_factory(device, event_bus=None, user_manifest_in_v0=False): await running_backend_ready.wait() event_bus = event_bus or event_bus_factory() if not user_manifest_in_v0: # Create a storage just for this operation (the underlying database # will be reused by the core's storage thanks to `persistent_mockup`) path = core_config.data_base_dir / device.slug async with UserStorage.run(device=device, path=path) as storage: await initialize_userfs_storage_v1(storage) with event_bus.listen() as spy: async with logged_core_factory(core_config, device, event_bus) as core: # On startup core is always considered offline. # Hence we risk concurrency issues if the connection to backend # switches online concurrently with the test. if "running_backend" in request.fixturenames: await spy.wait_with_timeout( CoreEvent.BACKEND_CONNECTION_CHANGED, { "status": BackendConnStatus.READY, "status_exc": spy.ANY }, ) assert core.are_monitors_idle() yield core
async def main(): # Config config_dir = get_default_config_dir(os.environ) config = load_config(config_dir) devices = list_available_devices(config_dir) key_file = next(key_file for _, device_id, _, key_file in devices if device_id == DEVICE_ID) device = load_device_with_password(key_file, PASSWORD) # Log in async with logged_core_factory(config, device) as core: # Get workspace user_manifest = core.user_fs.get_user_manifest() workspace_entry = user_manifest.workspaces[0] workspace = core.user_fs.get_workspace(workspace_entry.id) # await make_workspace_dir_inconsistent(device, workspace, "/bar") await make_workspace_dir_simple_versions(device, workspace, "/foo")
async def _core_factory(device, event_bus=None): # Ensure test doesn't stay frozen if a bug in a fixture prevent the # backend from starting async with real_clock_timeout(): await running_backend_ready() event_bus = event_bus or event_bus_factory() with event_bus.listen() as spy: async with logged_core_factory(core_config, device, event_bus) as core: # On startup core is always considered offline. # Hence we risk concurrency issues if the connection to backend # switches online concurrently with the test. if "running_backend" in request.fixturenames: await spy.wait_with_timeout( CoreEvent.BACKEND_CONNECTION_CHANGED, {"status": BackendConnStatus.READY, "status_exc": spy.ANY}, ) assert core.are_monitors_idle() yield core