async def _alice_context(): async with backend_authenticated_cmds_factory( alice.organization_addr, alice.device_id, alice.signing_key ) as cmds: rdm = RemoteDevicesManager(cmds, alice.root_verify_key) async with UserFS.run( alice, path, cmds, rdm, event_bus, get_prevent_sync_pattern() ) as user_fs: yield user_fs
async def logged_core_factory(config: CoreConfig, device: LocalDevice, event_bus: Optional[EventBus] = None): event_bus = event_bus or EventBus() prevent_sync_pattern = get_prevent_sync_pattern( config.prevent_sync_pattern_path) backend_conn = BackendAuthenticatedConn( addr=device.organization_addr, device_id=device.device_id, signing_key=device.signing_key, event_bus=event_bus, max_cooldown=config.backend_max_cooldown, max_pool=config.backend_max_connections, keepalive=config.backend_connection_keepalive, ) remote_devices_manager = RemoteDevicesManager(backend_conn.cmds, device.root_verify_key) async with UserFS.run( data_base_dir=config.data_base_dir, device=device, backend_cmds=backend_conn.cmds, remote_devices_manager=remote_devices_manager, event_bus=event_bus, prevent_sync_pattern=prevent_sync_pattern, preferred_language=config.gui_language, workspace_storage_cache_size=config.workspace_storage_cache_size, ) as user_fs: backend_conn.register_monitor( partial(monitor_messages, user_fs, event_bus)) backend_conn.register_monitor(partial(monitor_sync, user_fs, event_bus)) async with backend_conn.run(): async with mountpoint_manager_factory( user_fs, event_bus, config.mountpoint_base_dir, mount_all=config.mountpoint_enabled, mount_on_workspace_created=config.mountpoint_enabled, mount_on_workspace_shared=config.mountpoint_enabled, unmount_on_workspace_revoked=config.mountpoint_enabled, exclude_from_mount_all=config.disabled_workspaces, ) as mountpoint_manager: yield LoggedCore( config=config, device=device, event_bus=event_bus, mountpoint_manager=mountpoint_manager, user_fs=user_fs, remote_devices_manager=remote_devices_manager, backend_conn=backend_conn, )
async def _user_fs_factory(device, event_bus=None, initialize_in_v0: bool = False): event_bus = event_bus or event_bus_factory() async with backend_authenticated_cmds_factory( device.organization_addr, device.device_id, device.signing_key ) as cmds: path = local_storage_path(device) rdm = RemoteDevicesManager(cmds, device.root_verify_key) async with UserFS.run(device, path, cmds, rdm, event_bus) as user_fs: if not initialize_in_v0: await initialize_userfs_storage_v1(user_fs.storage) yield user_fs
async def _user_fs_factory(device, event_bus=None, data_base_dir=data_base_dir): event_bus = event_bus or event_bus_factory() async with backend_authenticated_cmds_factory( device.organization_addr, device.device_id, device.signing_key) as cmds: rdm = RemoteDevicesManager(cmds, device.root_verify_key) async with UserFS.run(data_base_dir, device, cmds, rdm, event_bus, get_prevent_sync_pattern()) as user_fs: yield user_fs
async def logged_core_factory( config: CoreConfig, device: LocalDevice, event_bus: Optional[EventBus] = None ): event_bus = event_bus or EventBus() backend_conn = BackendAuthenticatedConn( addr=device.organization_addr, device_id=device.device_id, signing_key=device.signing_key, event_bus=event_bus, max_cooldown=config.backend_max_cooldown, max_pool=config.backend_max_connections, keepalive=config.backend_connection_keepalive, ) path = config.data_base_dir / device.slug remote_devices_manager = RemoteDevicesManager(backend_conn.cmds, device.root_verify_key) async with UserFS.run( device, path, backend_conn.cmds, remote_devices_manager, event_bus ) as user_fs: backend_conn.register_monitor(partial(monitor_messages, user_fs, event_bus)) backend_conn.register_monitor(partial(monitor_sync, user_fs, event_bus)) async with backend_conn.run(): async with mountpoint_manager_factory( user_fs, event_bus, config.mountpoint_base_dir ) as mountpoint_manager: yield LoggedCore( config=config, device=device, event_bus=event_bus, remote_devices_manager=remote_devices_manager, mountpoint_manager=mountpoint_manager, backend_conn=backend_conn, user_fs=user_fs, )
async def create_inconsistent_workspace( user_fs: UserFS, name=EntryName("w")) -> WorkspaceFS: wid = await user_fs.workspace_create(name) workspace = user_fs.get_workspace(wid) await make_workspace_dir_inconsistent(workspace, FsPath("/rep")) return workspace
async def monitor_sync(user_fs: UserFS, event_bus: EventBus, task_status): ctxs = SyncContextStore(user_fs) early_wakeup = trio.Event() def _trigger_early_wakeup(): early_wakeup.set() # Don't wait for the *actual* awakening to change the status to # avoid having a period of time when the awakening is scheduled but # not yet notified to task_status task_status.awake() def _on_entry_updated(event, id, workspace_id=None): if workspace_id is None: # User manifest assert id == user_fs.user_manifest_id ctx = ctxs.get(id) else: ctx = ctxs.get(workspace_id) if ctx and ctx.set_local_change(id): _trigger_early_wakeup() def _on_realm_vlobs_updated(sender, realm_id, checkpoint, src_id, src_version): ctx = ctxs.get(realm_id) if ctx and ctx.set_remote_change(src_id): _trigger_early_wakeup() def _on_sharing_updated(sender, new_entry, previous_entry): # If role have changed we have to reset the sync context given # behavior could have changed a lot (e.g. switching to/from read-only) ctxs.discard(new_entry.id) if new_entry.role is not None: ctx = ctxs.get(new_entry.id) if ctx: # Change the due_time so the context understants the early # wakeup is for him ctx.due_time = current_time() _trigger_early_wakeup() def _on_entry_confined(event, entry_id, cause_id, workspace_id): ctx = ctxs.get(workspace_id) if ctx is not None: ctx.set_confined_entry(entry_id, cause_id) async def _ctx_action(ctx, meth): try: return await getattr(ctx, meth)() except BackendNotAvailable: raise except Exception: logger.exception("Sync monitor has crashed", workspace_id=ctx.id) # Reset sync context which is now in an undefined state ctxs.discard(ctx.id) ctx = ctxs.get(ctx.id) if ctx: # Add small cooldown just to be sure not end up in a crazy busy error loop ctx.due_time = current_time() + TICK_CRASH_COOLDOWN return ctx.due_time else: return math.inf with event_bus.connect_in_context( (CoreEvent.FS_ENTRY_UPDATED, _on_entry_updated), (CoreEvent.BACKEND_REALM_VLOBS_UPDATED, _on_realm_vlobs_updated), (CoreEvent.SHARING_UPDATED, _on_sharing_updated), (CoreEvent.FS_ENTRY_CONFINED, _on_entry_confined), ): due_times = [] # Init userfs sync context ctx = ctxs.get(user_fs.user_manifest_id) due_times.append(await _ctx_action(ctx, "bootstrap")) # Init workspaces sync context user_manifest = user_fs.get_user_manifest() for entry in user_manifest.workspaces: if entry.role is not None: ctx = ctxs.get(entry.id) if ctx: due_times.append(await _ctx_action(ctx, "bootstrap")) task_status.started() while True: next_due_time = min(due_times) if next_due_time == math.inf: task_status.idle() with trio.move_on_at(next_due_time) as cancel_scope: await early_wakeup.wait() early_wakeup = trio.Event() # In case of early wakeup, `_trigger_early_wakeup` is responsible # for calling `task_status.awake()` if cancel_scope.cancelled_caught: task_status.awake() due_times.clear() await freeze_sync_monitor_mockpoint() for ctx in ctxs.iter(): due_times.append(await _ctx_action(ctx, "tick"))
def __init__(self, user_fs: UserFS, id: EntryID): self.workspace = user_fs.get_workspace(id) read_only = self.workspace.get_workspace_entry().role == WorkspaceRole.READER super().__init__(user_fs, id, read_only=read_only)