async def _gui_factory(event_bus=None, core_config=core_config, start_arg=None): # First start popup blocks the test # Check version and mountpoint are useless for most tests core_config = core_config.evolve( gui_check_version_at_startup=False, gui_first_launch=False, gui_last_version=parsec_version, mountpoint_enabled=False, gui_language="en", ) event_bus = event_bus or EventBus() # Language config rely on global var, must reset it for each test ! switch_language(core_config) def _create_main_window(): # Pass minimize_on_close to avoid having test blocked by the # closing confirmation prompt switch_language(core_config, "en") main_w = MainWindow(qt_thread_gateway._job_scheduler, event_bus, core_config, minimize_on_close=True) qtbot.add_widget(main_w) main_w.showMaximized() main_w.show_top() windows.append(main_w) main_w.add_instance(start_arg) return main_w return await qt_thread_gateway.send_action(_create_main_window)
async def backend_app_factory(config: BackendConfig, event_bus: Optional[EventBus] = None): event_bus = event_bus or EventBus() if config.db_url == "MOCKED": components_factory = mocked_components_factory else: components_factory = postgresql_components_factory async with components_factory(config=config, event_bus=event_bus) as components: yield BackendApp( config=config, event_bus=event_bus, webhooks=components["webhooks"], http=components["http"], user=components["user"], invite=components["invite"], organization=components["organization"], message=components["message"], realm=components["realm"], vlob=components["vlob"], ping=components["ping"], blockstore=components["blockstore"], block=components["block"], events=components["events"], )
async def logged_core_factory(config: CoreConfig, device: LocalDevice, event_bus: Optional[EventBus] = None): event_bus = event_bus or EventBus() prevent_sync_pattern = get_prevent_sync_pattern( config.prevent_sync_pattern_path) backend_conn = BackendAuthenticatedConn( addr=device.organization_addr, device_id=device.device_id, signing_key=device.signing_key, event_bus=event_bus, max_cooldown=config.backend_max_cooldown, max_pool=config.backend_max_connections, keepalive=config.backend_connection_keepalive, ) remote_devices_manager = RemoteDevicesManager(backend_conn.cmds, device.root_verify_key) async with UserFS.run( data_base_dir=config.data_base_dir, device=device, backend_cmds=backend_conn.cmds, remote_devices_manager=remote_devices_manager, event_bus=event_bus, prevent_sync_pattern=prevent_sync_pattern, preferred_language=config.gui_language, workspace_storage_cache_size=config.workspace_storage_cache_size, ) as user_fs: backend_conn.register_monitor( partial(monitor_messages, user_fs, event_bus)) backend_conn.register_monitor(partial(monitor_sync, user_fs, event_bus)) async with backend_conn.run(): async with mountpoint_manager_factory( user_fs, event_bus, config.mountpoint_base_dir, mount_all=config.mountpoint_enabled, mount_on_workspace_created=config.mountpoint_enabled, mount_on_workspace_shared=config.mountpoint_enabled, unmount_on_workspace_revoked=config.mountpoint_enabled, exclude_from_mount_all=config.disabled_workspaces, ) as mountpoint_manager: yield LoggedCore( config=config, device=device, event_bus=event_bus, mountpoint_manager=mountpoint_manager, user_fs=user_fs, remote_devices_manager=remote_devices_manager, backend_conn=backend_conn, )
async def _gui_factory(event_bus=None, core_config=core_config, start_arg=None): # First start popup blocks the test # Check version and mountpoint are useless for most tests core_config = core_config.evolve( gui_check_version_at_startup=False, gui_first_launch=False, gui_last_version=parsec_version, mountpoint_enabled=True, gui_language="en", ) event_bus = event_bus or EventBus() # Language config rely on global var, must reset it for each test ! switch_language(core_config) def _create_main_window(): # Pass minimize_on_close to avoid having test blocked by the # closing confirmation prompt switch_language(core_config, "en") monkeypatch.setattr( "parsec.core.gui.main_window.list_available_devices", lambda *args, **kwargs: (["a"]), ) main_w = MainWindow(qt_thread_gateway._job_scheduler, event_bus, core_config, minimize_on_close=True) qtbot.add_widget(main_w) main_w.showMaximized() main_w.show_top() windows.append(main_w) main_w.add_instance(start_arg) def right_main_window(): assert ParsecApp.get_main_window() is main_w # For some reasons, the main window from the previous test might # still be around. Simply wait for things to settle down until # our freshly created window is detected as the app main window. qtbot.wait_until(right_main_window) return main_w return await qt_thread_gateway.send_action(_create_main_window)
async def logged_core_factory( config: CoreConfig, device: LocalDevice, event_bus: Optional[EventBus] = None ): event_bus = event_bus or EventBus() backend_conn = BackendAuthenticatedConn( addr=device.organization_addr, device_id=device.device_id, signing_key=device.signing_key, event_bus=event_bus, max_cooldown=config.backend_max_cooldown, max_pool=config.backend_max_connections, keepalive=config.backend_connection_keepalive, ) path = config.data_base_dir / device.slug remote_devices_manager = RemoteDevicesManager(backend_conn.cmds, device.root_verify_key) async with UserFS.run( device, path, backend_conn.cmds, remote_devices_manager, event_bus ) as user_fs: backend_conn.register_monitor(partial(monitor_messages, user_fs, event_bus)) backend_conn.register_monitor(partial(monitor_sync, user_fs, event_bus)) async with backend_conn.run(): async with mountpoint_manager_factory( user_fs, event_bus, config.mountpoint_base_dir ) as mountpoint_manager: yield LoggedCore( config=config, device=device, event_bus=event_bus, remote_devices_manager=remote_devices_manager, mountpoint_manager=mountpoint_manager, backend_conn=backend_conn, user_fs=user_fs, )
def run_gui(config: CoreConfig, start_arg: str = None, diagnose: bool = False): logger.info("Starting UI") # Needed for High DPI usage of QIcons, otherwise only QImages are well scaled QApplication.setAttribute(Qt.AA_EnableHighDpiScaling) QApplication.setAttribute(Qt.AA_UseHighDpiPixmaps) app = QApplication(["-stylesheet"]) app.setOrganizationName("Scille") app.setOrganizationDomain("parsec.cloud") app.setApplicationName("Parsec") QFontDatabase.addApplicationFont(":/fonts/fonts/Roboto-Regular.ttf") f = QFont("Roboto") app.setFont(f) rc = QFile(":/styles/styles/main.css") rc.open(QFile.ReadOnly) content = rc.readAll().data() app.setStyleSheet(str(content, "utf-8")) lang_key = lang.switch_language(config) event_bus = EventBus() with run_trio_thread() as jobs_ctx: win = MainWindow( jobs_ctx=jobs_ctx, event_bus=event_bus, config=config, minimize_on_close=config.gui_tray_enabled and systray_available(), ) result_queue = Queue(maxsize=1) class ThreadSafeNoQtSignal(ThreadSafeQtSignal): def __init__(self): self.qobj = None self.signal_name = "" self.args_types = () def emit(self, *args): pass jobs_ctx.submit_job( ThreadSafeNoQtSignal(), ThreadSafeNoQtSignal(), _start_ipc_server, config, win, start_arg, result_queue, ) if result_queue.get() == "already_running": # Another instance of Parsec already started, nothing more to do return if systray_available(): systray = Systray(parent=win) win.systray_notification.connect(systray.on_systray_notification) systray.on_close.connect(win.close_app) systray.on_show.connect(win.show_top) app.aboutToQuit.connect(before_quit(systray)) if config.gui_tray_enabled: app.setQuitOnLastWindowClosed(False) if config.gui_check_version_at_startup and not diagnose: CheckNewVersion(jobs_ctx=jobs_ctx, event_bus=event_bus, config=config, parent=win) win.showMaximized(skip_dialogs=diagnose) win.show_top() win.new_instance_needed.emit(start_arg) def kill_window(*args): win.close_app(force=True) QApplication.quit() signal.signal(signal.SIGINT, kill_window) # QTimer wakes up the event loop periodically which allows us to close # the window even when it is in background. timer = QTimer() timer.start(1000 if diagnose else 400) timer.timeout.connect(kill_window if diagnose else lambda: None) if diagnose: diagnose_timer = QTimer() diagnose_timer.start(1000) diagnose_timer.timeout.connect(kill_window) if lang_key: event_bus.send("gui.config.changed", gui_language=lang_key) if diagnose: with fail_on_first_exception(kill_window): return app.exec_() else: return app.exec_()
async def winfsp_mountpoint_runner( user_fs: UserFS, workspace_fs: WorkspaceFS, base_mountpoint_path: PurePath, config: dict, event_bus: EventBus, ): """ Raises: MountpointDriverCrash """ device = workspace_fs.device workspace_name = winify_entry_name(workspace_fs.get_workspace_name()) trio_token = trio.lowlevel.current_trio_token() fs_access = ThreadFSAccess(trio_token, workspace_fs, event_bus) user_manifest = user_fs.get_user_manifest() workspace_ids = [entry.id for entry in user_manifest.workspaces] workspace_index = workspace_ids.index(workspace_fs.workspace_id) # `base_mountpoint_path` is ignored given we only mount from a drive mountpoint_path = await _get_available_drive(workspace_index, len(workspace_ids)) # Prepare event information event_kwargs = { "mountpoint": mountpoint_path, "workspace_id": workspace_fs.workspace_id, "timestamp": getattr(workspace_fs, "timestamp", None), } if config.get("debug", False): enable_debug_log() # Volume label is limited to 32 WCHAR characters, so force the label to # ascii to easily enforce the size. volume_label = (unicodedata.normalize( "NFKD", f"{workspace_name.capitalize()}").encode( "ascii", "ignore")[:32].decode("ascii")) volume_serial_number = _generate_volume_serial_number( device, workspace_fs.workspace_id) operations = WinFSPOperations(fs_access=fs_access, volume_label=volume_label, **event_kwargs) # See https://docs.microsoft.com/en-us/windows/desktop/api/fileapi/nf-fileapi-getvolumeinformationa # noqa fs = FileSystem( mountpoint_path.drive, operations, sector_size=512, sectors_per_allocation_unit=1, volume_creation_time=filetime_now(), volume_serial_number=volume_serial_number, file_info_timeout=1000, case_sensitive_search=1, case_preserved_names=1, unicode_on_disk=1, persistent_acls=0, reparse_points=0, reparse_points_access_check=0, named_streams=0, read_only_volume=workspace_fs.is_read_only(), post_cleanup_when_modified_only=1, device_control=0, um_file_context_is_user_context2=1, file_system_name="Parsec", prefix="", # The minimum value for IRP timeout is 1 minute (default is 5) irp_timeout=60000, # security_timeout_valid=1, # security_timeout=10000, ) try: event_bus.send(CoreEvent.MOUNTPOINT_STARTING, **event_kwargs) # Manage drive icon drive_letter, *_ = mountpoint_path.drive with parsec_drive_icon_context(drive_letter): # Run fs start in a thread await trio.to_thread.run_sync(fs.start) # The system is too sensitive right after starting await trio.sleep(0.010) # 10 ms # Make sure the mountpoint is ready await _wait_for_winfsp_ready(mountpoint_path) # Notify the manager that the mountpoint is ready yield mountpoint_path # Start recording `sharing.updated` events with event_bus.waiter_on(CoreEvent.SHARING_UPDATED) as waiter: # Loop over `sharing.updated` event while True: # Restart the mountpoint with the right read_only flag if necessary # Don't bother with restarting if the workspace has been revoked # It's the manager's responsibility to unmount the workspace in this case if (workspace_fs.is_read_only() != fs.volume_params["read_only_volume"] and not workspace_fs.is_revoked()): restart = partial( fs.restart, read_only_volume=workspace_fs.is_read_only()) await trio.to_thread.run_sync(restart) # Wait and reset waiter await waiter.wait() waiter.clear() except Exception as exc: raise MountpointDriverCrash( f"WinFSP has crashed on {mountpoint_path}: {exc}") from exc finally: event_bus.send(CoreEvent.MOUNTPOINT_STOPPING, **event_kwargs) # Must run in thread given this call will wait for any winfsp operation # to finish so blocking the trio loop can produce a dead lock... with trio.CancelScope(shield=True): try: await trio.to_thread.run_sync(fs.stop) # The file system might not be started, # if the task gets cancelled before running `fs.start` for instance except FileSystemNotStarted: pass
def run_gui(config: CoreConfig, start_arg: str = None, diagnose: bool = False): logger.info("Starting UI") # Needed for High DPI usage of QIcons, otherwise only QImages are well scaled QApplication.setAttribute(Qt.AA_EnableHighDpiScaling) QApplication.setAttribute(Qt.AA_UseHighDpiPixmaps) QApplication.setHighDpiScaleFactorRoundingPolicy( Qt.HighDpiScaleFactorRoundingPolicy.PassThrough) app = ParsecApp() app.load_stylesheet() app.load_font() lang_key = lang.switch_language(config) event_bus = EventBus() with run_trio_thread() as jobs_ctx: win = MainWindow( jobs_ctx=jobs_ctx, event_bus=event_bus, config=config, minimize_on_close=config.gui_tray_enabled and systray_available(), ) result_queue = Queue(maxsize=1) class ThreadSafeNoQtSignal(ThreadSafeQtSignal): def __init__(self): self.qobj = None self.signal_name = "" self.args_types = () def emit(self, *args): pass jobs_ctx.submit_job( ThreadSafeNoQtSignal(), ThreadSafeNoQtSignal(), _start_ipc_server, config, win, start_arg, result_queue, ) if result_queue.get() == "already_running": # Another instance of Parsec already started, nothing more to do return if systray_available(): systray = Systray(parent=win) win.systray_notification.connect(systray.on_systray_notification) systray.on_close.connect(win.close_app) systray.on_show.connect(win.show_top) app.aboutToQuit.connect(before_quit(systray)) if config.gui_tray_enabled: app.setQuitOnLastWindowClosed(False) if config.gui_check_version_at_startup and not diagnose: CheckNewVersion(jobs_ctx=jobs_ctx, event_bus=event_bus, config=config, parent=win) win.show_window(skip_dialogs=diagnose, invitation_link=start_arg) win.show_top() win.new_instance_needed.emit(start_arg) def kill_window(*args): win.close_app(force=True) QApplication.quit() signal.signal(signal.SIGINT, kill_window) # QTimer wakes up the event loop periodically which allows us to close # the window even when it is in background. timer = QTimer() timer.start(1000 if diagnose else 400) timer.timeout.connect(kill_window if diagnose else lambda: None) if diagnose: diagnose_timer = QTimer() diagnose_timer.start(1000) diagnose_timer.timeout.connect(kill_window) if lang_key: event_bus.send(CoreEvent.GUI_CONFIG_CHANGED, gui_language=lang_key) if diagnose: with fail_on_first_exception(kill_window): return app.exec_() else: with log_pyqt_exceptions(): return app.exec_()
async def fuse_mountpoint_runner( user_fs: UserFS, workspace_fs: WorkspaceFS, base_mountpoint_path: PurePath, config: dict, event_bus: EventBus, ): """ Raises: MountpointDriverCrash """ fuse_thread_started = threading.Event() fuse_thread_stopped = threading.Event() trio_token = trio.lowlevel.current_trio_token() fs_access = ThreadFSAccess(trio_token, workspace_fs, event_bus) mountpoint_path, initial_st_dev = await _bootstrap_mountpoint( base_mountpoint_path, workspace_fs) # Prepare event information event_kwargs = { "mountpoint": mountpoint_path, "workspace_id": workspace_fs.workspace_id, "timestamp": getattr(workspace_fs, "timestamp", None), } fuse_operations = FuseOperations(fs_access, **event_kwargs) try: teardown_cancel_scope = None event_bus.send(CoreEvent.MOUNTPOINT_STARTING, **event_kwargs) async with trio.open_service_nursery() as nursery: # Let fusepy decode the paths using the current file system encoding # Note that this does not prevent the user from using a certain encoding # in the context of the parsec app and another encoding in the context of # an application accessing the mountpoint. In this case, an encoding error # might be raised while fuspy tries to decode the path. If that happens, # fuspy will log the error and simply return EINVAL, which is acceptable. encoding = sys.getfilesystemencoding() def _run_fuse_thread(): with importlib_resources.files(resources_module).joinpath( "parsec.icns") as parsec_icns_path: fuse_platform_options = {} if sys.platform == "darwin": fuse_platform_options = { "local": True, "defer_permissions": True, "volname": workspace_fs.get_workspace_name(), "volicon": str(parsec_icns_path.absolute()), } # osxfuse-specific options : # local: allows mountpoint to show up correctly in finder (+ desktop) # volname: specify volume name (default is OSXFUSE [...]) # volicon: specify volume icon (default is macOS drive icon) # On defer_permissions: "The defer_permissions option [...] causes macFUSE to assume that all # accesses are allowed; it will forward all operations to the file system, and it is up to # somebody else to eventually allow or deny the operations." See # https://github.com/osxfuse/osxfuse/wiki/Mount-options#default_permissions-and-defer_permissions # This option is necessary on MacOS to give the right permissions to files inside FUSE drives, # otherwise it impedes upon saving and auto-saving from Apple softwares (TextEdit, Preview...) # due to the gid of files seemingly not having writing rights from the software perspective. # One other solution found for this issue was to change the gid of the mountpoint and its files # from staff (default) to wheel (admin gid). Checking defer_permissions allows to ignore the gid # issue entirely and lets Parsec itself handle read/write rights inside workspaces. else: fuse_platform_options = {"auto_unmount": True} logger.info("Starting fuse thread...", mountpoint=mountpoint_path) try: # Do not let fuse start if the runner is stopping # It's important that `fuse_thread_started` is set before the check # in order to avoid race conditions fuse_thread_started.set() if teardown_cancel_scope is not None: return FUSE( fuse_operations, str(mountpoint_path.absolute()), foreground=True, encoding=encoding, **fuse_platform_options, **config, ) except Exception as exc: try: errcode = errno.errorcode[exc.args[0]] except (KeyError, IndexError): errcode = f"Unknown error code: {exc}" raise MountpointDriverCrash( f"Fuse has crashed on {mountpoint_path}: {errcode}" ) from exc finally: fuse_thread_stopped.set() # We're about to call the `fuse_main_real` function from libfuse, so let's make sure # the signals are correctly patched before that (`_path_signals` is idempotent) _patch_signals() nursery.start_soon(lambda: trio.to_thread.run_sync( _run_fuse_thread, cancellable=True)) await _wait_for_fuse_ready(mountpoint_path, fuse_thread_started, initial_st_dev) # Indicate the mountpoint is now started yield mountpoint_path finally: event_bus.send(CoreEvent.MOUNTPOINT_STOPPING, **event_kwargs) with trio.CancelScope(shield=True) as teardown_cancel_scope: await _stop_fuse_thread(mountpoint_path, fuse_operations, fuse_thread_started, fuse_thread_stopped) await _teardown_mountpoint(mountpoint_path)
def _handle_event(event_bus: EventBus, rep: dict) -> None: if rep["status"] != "ok": logger.warning("Bad response to `events_listen` command", rep=rep) return if rep["event"] == "message.received": event_bus.send("backend.message.received", index=rep["index"]) elif rep["event"] == "pinged": event_bus.send("backend.pinged", ping=rep["ping"]) elif rep["event"] == "realm.roles_updated": realm_id = EntryID(rep["realm_id"]) event_bus.send("backend.realm.roles_updated", realm_id=realm_id, role=rep["role"]) elif rep["event"] == "realm.vlobs_updated": src_id = EntryID(rep["src_id"]) realm_id = EntryID(rep["realm_id"]) event_bus.send( "backend.realm.vlobs_updated", realm_id=realm_id, checkpoint=rep["checkpoint"], src_id=src_id, src_version=rep["src_version"], ) elif rep["event"] == "realm.maintenance_started": event_bus.send( "backend.realm.maintenance_started", realm_id=rep["realm_id"], encryption_revision=rep["encryption_revision"], ) elif rep["event"] == "realm.maintenance_finished": event_bus.send( "backend.realm.maintenance_finished", realm_id=rep["realm_id"], encryption_revision=rep["encryption_revision"], )
def _handle_event(event_bus: EventBus, rep: dict) -> None: if rep["status"] != "ok": logger.warning("Bad response to `events_listen` command", rep=rep) return if rep["event"] == APIEvent.MESSAGE_RECEIVED: event_bus.send(CoreEvent.BACKEND_MESSAGE_RECEIVED, index=rep["index"]) elif rep["event"] == APIEvent.PINGED: event_bus.send(CoreEvent.BACKEND_PINGED, ping=rep["ping"]) elif rep["event"] == APIEvent.REALM_ROLES_UPDATED: realm_id = EntryID(rep["realm_id"]) event_bus.send(CoreEvent.BACKEND_REALM_ROLES_UPDATED, realm_id=realm_id, role=rep["role"]) elif rep["event"] == APIEvent.REALM_VLOBS_UPDATED: src_id = EntryID(rep["src_id"]) realm_id = EntryID(rep["realm_id"]) event_bus.send( CoreEvent.BACKEND_REALM_VLOBS_UPDATED, realm_id=realm_id, checkpoint=rep["checkpoint"], src_id=src_id, src_version=rep["src_version"], ) elif rep["event"] == APIEvent.REALM_MAINTENANCE_STARTED: event_bus.send( CoreEvent.BACKEND_REALM_MAINTENANCE_STARTED, realm_id=rep["realm_id"], encryption_revision=rep["encryption_revision"], ) elif rep["event"] == APIEvent.REALM_MAINTENANCE_FINISHED: event_bus.send( CoreEvent.BACKEND_REALM_MAINTENANCE_FINISHED, realm_id=rep["realm_id"], encryption_revision=rep["encryption_revision"], )
def __init__(self, config, event_bus=None): self.event_bus = event_bus or EventBus() self.config = config self.nursery = None self.dbh = None self.events = EventsComponent(self.event_bus) if self.config.db_url == "MOCKED": self.user = MemoryUserComponent(self.event_bus) self.organization = MemoryOrganizationComponent(self.user) self.message = MemoryMessageComponent(self.event_bus) self.beacon = MemoryBeaconComponent(self.event_bus) self.vlob = MemoryVlobComponent(self.event_bus, self.beacon) self.ping = MemoryPingComponent(self.event_bus) self.blockstore = blockstore_factory(self.config.blockstore_config) else: self.dbh = PGHandler(self.config.db_url, self.event_bus) self.user = PGUserComponent(self.dbh, self.event_bus) self.organization = PGOrganizationComponent(self.dbh, self.user) self.message = PGMessageComponent(self.dbh) self.beacon = PGBeaconComponent(self.dbh) self.vlob = PGVlobComponent(self.dbh, self.beacon) self.ping = PGPingComponent(self.dbh) self.blockstore = blockstore_factory(self.config.blockstore_config, postgresql_dbh=self.dbh) self.logged_cmds = { "events_subscribe": self.events.api_events_subscribe, "events_listen": self.events.api_events_listen, "ping": self.ping.api_ping, "beacon_read": self.beacon.api_beacon_read, # Message "message_get": self.message.api_message_get, "message_send": self.message.api_message_send, # User&Device "user_get": self.user.api_user_get, "user_find": self.user.api_user_find, "user_invite": self.user.api_user_invite, "user_cancel_invitation": self.user.api_user_cancel_invitation, "user_create": self.user.api_user_create, "device_invite": self.user.api_device_invite, "device_cancel_invitation": self.user.api_device_cancel_invitation, "device_create": self.user.api_device_create, "device_revoke": self.user.api_device_revoke, # Blockstore "blockstore_create": self.blockstore.api_blockstore_create, "blockstore_read": self.blockstore.api_blockstore_read, # Vlob "vlob_group_check": self.vlob.api_vlob_group_check, "vlob_create": self.vlob.api_vlob_create, "vlob_read": self.vlob.api_vlob_read, "vlob_update": self.vlob.api_vlob_update, } self.anonymous_cmds = { "user_claim": self.user.api_user_claim, "user_get_invitation_creator": self.user.api_user_get_invitation_creator, "device_claim": self.user.api_device_claim, "device_get_invitation_creator": self.user.api_device_get_invitation_creator, "organization_bootstrap": self.organization.api_organization_bootstrap, "ping": self.ping.api_ping, } self.administration_cmds = { "organization_create": self.organization.api_organization_create, "ping": self.ping.api_ping, } for fn in self.anonymous_cmds.values(): check_anonymous_api_allowed(fn)
async def _run_gui(app: ParsecApp, config: CoreConfig, start_arg: str = None, diagnose: bool = False): app.load_stylesheet() app.load_font() lang_key = lang.switch_language(config) event_bus = EventBus() async with run_trio_job_scheduler() as jobs_ctx: win = MainWindow( jobs_ctx=jobs_ctx, quit_callback=jobs_ctx.close, event_bus=event_bus, config=config, minimize_on_close=config.gui_tray_enabled and systray_available(), ) # Attempt to run an IPC server if Parsec is not already started try: await jobs_ctx.nursery.start(_run_ipc_server, config, win, start_arg) # Another instance of Parsec already started, nothing more to do except IPCServerAlreadyRunning: return # If we are here, it's either the IPC server has successfully started # or it has crashed without being able to communicate with an existing # IPC server. Such case is of course not supposed to happen but if it # does we nevertheless keep the application running as a kind of # failsafe mode (and the crash reason is logged and sent to telemetry). # Systray is not displayed on MacOS, having natively a menu with similar functions. if systray_available() and sys.platform != "darwin": systray = Systray(parent=win) win.systray_notification.connect(systray.on_systray_notification) systray.on_close.connect(win.close_app) systray.on_show.connect(win.show_top) app.aboutToQuit.connect(before_quit(systray)) if config.gui_tray_enabled: app.setQuitOnLastWindowClosed(False) if config.gui_check_version_at_startup and not diagnose: CheckNewVersion(jobs_ctx=jobs_ctx, event_bus=event_bus, config=config, parent=win) win.show_window(skip_dialogs=diagnose) win.show_top() win.new_instance_needed.emit(start_arg) if sys.platform == "darwin": # macFUSE is not bundled with Parsec and must be manually installed by the user # so we detect early such need to provide a help dialogue ;-) # TODO: provide a similar mechanism on Windows&Linux to handle mountpoint runner not available from parsec.core.gui.instance_widget import ensure_macfuse_available_or_show_dialogue ensure_macfuse_available_or_show_dialogue(win) def kill_window(*args): win.close_app(force=True) signal.signal(signal.SIGINT, kill_window) # QTimer wakes up the event loop periodically which allows us to close # the window even when it is in background. timer = QTimer() timer.start(400) timer.timeout.connect(lambda: None) if diagnose: diagnose_timer = QTimer() diagnose_timer.start(1000) diagnose_timer.timeout.connect(kill_window) if lang_key: event_bus.send(CoreEvent.GUI_CONFIG_CHANGED, gui_language=lang_key) with QDialogInProcess.manage_pools(): if diagnose: with fail_on_first_exception(kill_window): await trio.sleep_forever() else: with log_pyqt_exceptions(): await trio.sleep_forever()
async def logged_core_factory( config: CoreConfig, device: LocalDevice, event_bus: Optional[EventBus] = None, mountpoint: Optional[Path] = None, ): if config.mountpoint_enabled and os.name == "nt": logger.warning("Mountpoint disabled (not supported yet on Windows)") config = config.evolve(mountpoint_enabled=False) event_bus = event_bus or EventBus() # Plenty of nested scope to order components init/teardown async with trio.open_nursery() as root_nursery: # TODO: Currently backend_listen_events connect to backend and # switch to listen events mode, then monitors kick in and send it # events about which beacons to listen on, obliging to restart the # listen connection... backend_online = await root_nursery.start(backend_listen_events, device, event_bus) async with backend_cmds_factory( device.organization_addr, device.device_id, device.signing_key, config.backend_max_connections, ) as backend_cmds_pool: local_db = LocalDB(config.data_base_dir / device.device_id) encryption_manager = EncryptionManager(device, local_db, backend_cmds_pool) fs = FS(device, local_db, backend_cmds_pool, encryption_manager, event_bus) async with trio.open_nursery() as monitor_nursery: # Finally start monitors # Monitor connection must be first given it will watch on # other monitors' events await monitor_nursery.start(monitor_backend_connection, backend_online, event_bus) await monitor_nursery.start(monitor_beacons, device, fs, event_bus) await monitor_nursery.start(monitor_messages, backend_online, fs, event_bus) await monitor_nursery.start(monitor_sync, backend_online, fs, event_bus) # TODO: rework mountpoint manager to avoid init/teardown mountpoint_manager = mountpoint_manager_factory(fs, event_bus) await mountpoint_manager.init(monitor_nursery) if config.mountpoint_enabled: if not mountpoint: mountpoint = config.mountpoint_base_dir / device.device_id await mountpoint_manager.start(mountpoint) try: yield LoggedCore( config=config, device=device, local_db=local_db, event_bus=event_bus, encryption_manager=encryption_manager, mountpoint_manager=mountpoint_manager, backend_cmds=backend_cmds_pool, fs=fs, ) root_nursery.cancel_scope.cancel() finally: if config.mountpoint_enabled: await mountpoint_manager.teardown()
async def monitor_sync(user_fs: UserFS, event_bus: EventBus, task_status): ctxs = SyncContextStore(user_fs) early_wakeup = trio.Event() def _trigger_early_wakeup(): early_wakeup.set() # Don't wait for the *actual* awakening to change the status to # avoid having a period of time when the awakening is scheduled but # not yet notified to task_status task_status.awake() def _on_entry_updated(event, id, workspace_id=None): if workspace_id is None: # User manifest assert id == user_fs.user_manifest_id ctx = ctxs.get(id) else: ctx = ctxs.get(workspace_id) if ctx and ctx.set_local_change(id): _trigger_early_wakeup() def _on_realm_vlobs_updated(sender, realm_id, checkpoint, src_id, src_version): ctx = ctxs.get(realm_id) if ctx and ctx.set_remote_change(src_id): _trigger_early_wakeup() def _on_sharing_updated(sender, new_entry, previous_entry): # If role have changed we have to reset the sync context given # behavior could have changed a lot (e.g. switching to/from read-only) ctxs.discard(new_entry.id) if new_entry.role is not None: ctx = ctxs.get(new_entry.id) if ctx: # Change the due_time so the context understants the early # wakeup is for him ctx.due_time = current_time() _trigger_early_wakeup() def _on_entry_confined(event, entry_id, cause_id, workspace_id): ctx = ctxs.get(workspace_id) if ctx is not None: ctx.set_confined_entry(entry_id, cause_id) async def _ctx_action(ctx, meth): try: return await getattr(ctx, meth)() except BackendNotAvailable: raise except Exception: logger.exception("Sync monitor has crashed", workspace_id=ctx.id) # Reset sync context which is now in an undefined state ctxs.discard(ctx.id) ctx = ctxs.get(ctx.id) if ctx: # Add small cooldown just to be sure not end up in a crazy busy error loop ctx.due_time = current_time() + TICK_CRASH_COOLDOWN return ctx.due_time else: return math.inf with event_bus.connect_in_context( (CoreEvent.FS_ENTRY_UPDATED, _on_entry_updated), (CoreEvent.BACKEND_REALM_VLOBS_UPDATED, _on_realm_vlobs_updated), (CoreEvent.SHARING_UPDATED, _on_sharing_updated), (CoreEvent.FS_ENTRY_CONFINED, _on_entry_confined), ): due_times = [] # Init userfs sync context ctx = ctxs.get(user_fs.user_manifest_id) due_times.append(await _ctx_action(ctx, "bootstrap")) # Init workspaces sync context user_manifest = user_fs.get_user_manifest() for entry in user_manifest.workspaces: if entry.role is not None: ctx = ctxs.get(entry.id) if ctx: due_times.append(await _ctx_action(ctx, "bootstrap")) task_status.started() while True: next_due_time = min(due_times) if next_due_time == math.inf: task_status.idle() with trio.move_on_at(next_due_time) as cancel_scope: await early_wakeup.wait() early_wakeup = trio.Event() # In case of early wakeup, `_trigger_early_wakeup` is responsible # for calling `task_status.awake()` if cancel_scope.cancelled_caught: task_status.awake() due_times.clear() await freeze_sync_monitor_mockpoint() for ctx in ctxs.iter(): due_times.append(await _ctx_action(ctx, "tick"))