async def _fetch_remote_user_manifest(self, version: int = None) -> UserManifest: """ Raises: FSError FSWorkspaceInMaintenance FSBackendOfflineError """ try: # Note encryption_revision is always 1 given we never reencrypt # the user manifest's realm rep = await self.backend_cmds.vlob_read(1, self.user_manifest_id, version) except BackendNotAvailable as exc: raise FSBackendOfflineError(str(exc)) from exc if rep["status"] == "in_maintenance": raise FSWorkspaceInMaintenance( "Cannot access workspace data while it is in maintenance") elif rep["status"] != "ok": raise FSError(f"Cannot fetch user manifest from backend: {rep}") expected_author = rep["author"] expected_timestamp = rep["timestamp"] expected_version = rep["version"] blob = rep["blob"] try: author = await self.remote_devices_manager.get_device( expected_author) except RemoteDevicesManagerBackendOfflineError as exc: raise FSBackendOfflineError(str(exc)) from exc except RemoteDevicesManagerError as exc: raise FSError(f"Cannot retrieve author public key: {exc}") from exc try: manifest = UserManifest.decrypt_verify_and_load( blob, key=self.device.user_manifest_key, author_verify_key=author.verify_key, expected_id=self.device.user_manifest_id, expected_author=expected_author, expected_timestamp=expected_timestamp, expected_version=version if version is not None else expected_version, ) except DataError as exc: raise FSError(f"Invalid user manifest: {exc}") from exc return manifest
async def _retreive_participants(self, workspace_id): """ Raises: FSError FSBackendOfflineError FSWorkspaceNoAccess """ # First retrieve workspace participants list roles = await self.remote_loader.load_realm_current_roles(workspace_id) # Then retrieve each participant user data try: users = [] for user_id in roles.keys(): user, revoked_user = await self.remote_devices_manager.get_user(user_id) if not revoked_user: users.append(user) except RemoteDevicesManagerBackendOfflineError as exc: raise FSBackendOfflineError(str(exc)) from exc except RemoteDevicesManagerError as exc: raise FSError(f"Cannot retrieve workspace {workspace_id} participants: {exc}") from exc return users
def translate_backend_cmds_errors() -> Iterator[None]: try: yield except BackendNotAvailable as exc: raise FSBackendOfflineError(str(exc)) from exc except BackendConnectionError as exc: raise FSError(str(exc)) from exc
async def get_reencryption_need(self) -> ReencryptionNeed: """ Raises: FSError FSBackendOfflineError FSWorkspaceNoAccess """ wentry = self.get_workspace_entry() try: workspace_manifest = await self.local_storage.get_manifest( self.workspace_id) if workspace_manifest.is_placeholder: return ReencryptionNeed(user_revoked=(), role_revoked=(), reencryption_already_in_progress=False) except FSLocalMissError: pass try: rep = await self.backend_cmds.realm_status(self.workspace_id) except BackendNotAvailable as exc: raise FSBackendOfflineError(str(exc)) from exc except BackendConnectionError as exc: raise FSError( f"Cannot retreive remote status for workspace {self.workspace_id}: {exc}" ) from exc reencryption_already_in_progress = (rep["in_maintenance"] and rep["maintenance_type"] == MaintenanceType.REENCRYPTION) certificates = await self.remote_loader.load_realm_role_certificates() has_role = set() role_revoked = set() for certif in certificates: if certif.role is None: if certif.timestamp > wentry.encrypted_on: role_revoked.add(certif.user_id) has_role.discard(certif.user_id) else: role_revoked.discard(certif.user_id) has_role.add(certif.user_id) user_revoked = [] for user_id in has_role: _, revoked_user = await self.remote_loader.get_user(user_id, no_cache=True) if revoked_user and revoked_user.timestamp > wentry.encrypted_on: user_revoked.append(user_id) return ReencryptionNeed( user_revoked=tuple(user_revoked), role_revoked=tuple(role_revoked), reencryption_already_in_progress=reencryption_already_in_progress, )
async def workspace_continue_reencryption(self, workspace_id: EntryID) -> ReencryptionJob: """ Raises: FSError FSBackendOfflineError FSWorkspaceNoAccess FSWorkspaceNotFoundError """ user_manifest = self.get_user_manifest() workspace_entry = user_manifest.get_workspace_entry(workspace_id) if not workspace_entry: raise FSWorkspaceNotFoundError(f"Unknown workspace `{workspace_id}`") # First make sure the workspace is under maintenance try: rep = await self.backend_cmds.realm_status(workspace_entry.id) except BackendNotAvailable as exc: raise FSBackendOfflineError(str(exc)) from exc except BackendConnectionError as exc: raise FSError( f"Cannot continue maintenance on workspace {workspace_id}: {exc}" ) from exc if rep["status"] == "not_allowed": raise FSWorkspaceNoAccess(f"Not allowed to access workspace {workspace_id}: {rep}") elif rep["status"] != "ok": raise FSError(f"Error while getting status for workspace {workspace_id}: {rep}") if not rep["in_maintenance"] or rep["maintenance_type"] != MaintenanceType.REENCRYPTION: raise FSWorkspaceNotInMaintenance("Not in reencryption maintenance") current_encryption_revision = rep["encryption_revision"] if rep["encryption_revision"] != workspace_entry.encryption_revision: raise FSError("Bad encryption revision") # Must retrieve the previous encryption revision's key version_to_fetch = None while True: previous_user_manifest = await self._fetch_remote_user_manifest( version=version_to_fetch ) previous_workspace_entry = previous_user_manifest.get_workspace_entry( workspace_entry.id ) if not previous_workspace_entry: raise FSError( f"Never had access to encryption revision {current_encryption_revision - 1}" ) if previous_workspace_entry.encryption_revision == current_encryption_revision - 1: break else: version_to_fetch = previous_user_manifest.version - 1 return ReencryptionJob(self.backend_cmds, workspace_entry, previous_workspace_entry)
async def process_last_messages(self) -> Sequence[Tuple[int, Exception]]: """ Raises: FSError FSBackendOfflineError FSSharingNotAllowedError """ errors = [] # Concurrent message processing is totally pointless async with self._process_messages_lock: user_manifest = self.get_user_manifest() initial_last_processed_message = user_manifest.last_processed_message try: rep = await self.backend_cmds.message_get( offset=initial_last_processed_message) except BackendNotAvailable as exc: raise FSBackendOfflineError(str(exc)) from exc except BackendConnectionError as exc: raise FSError(f"Cannot retrieve user messages: {exc}") from exc if rep["status"] != "ok": raise FSError(f"Cannot retrieve user messages: {rep}") new_last_processed_message = initial_last_processed_message for msg in rep["messages"]: try: await self._process_message(msg["sender"], msg["timestamp"], msg["body"]) new_last_processed_message = msg["count"] except FSBackendOfflineError: raise except FSError as exc: logger.warning("Invalid message", reason=exc, sender=msg["sender"], count=msg["count"]) errors.append((msg["count"], exc)) # Update message offset in user manifest async with self._update_user_manifest_lock: user_manifest = self.get_user_manifest() if user_manifest.last_processed_message < new_last_processed_message: user_manifest = user_manifest.evolve_and_mark_updated( last_processed_message=new_last_processed_message, timestamp=self.device.timestamp(), ) await self.set_user_manifest(user_manifest) self.event_bus.send(CoreEvent.FS_ENTRY_UPDATED, id=self.user_manifest_id) return errors
async def _backend_cmds(self, cmd, *args, **kwargs): try: return await getattr(self.backend_cmds, cmd)(*args, **kwargs) except BackendNotAvailable as exc: raise FSBackendOfflineError(str(exc)) from exc except BackendConnectionError as exc: raise FSError( f"`{cmd}` request has failed due to connection error `{exc}`" ) from exc
def translate_remote_devices_manager_errors() -> Iterator[None]: try: yield except RemoteDevicesManagerBackendOfflineError as exc: raise FSBackendOfflineError(str(exc)) from exc except RemoteDevicesManagerUserNotFoundError as exc: raise FSUserNotFoundError(str(exc)) from exc except RemoteDevicesManagerDeviceNotFoundError as exc: raise FSDeviceNotFoundError(str(exc)) from exc except RemoteDevicesManagerInvalidTrustchainError as exc: raise FSInvalidTrustchainEror(str(exc)) from exc except RemoteDevicesManagerError as exc: raise FSRemoteOperationError(str(exc)) from exc
async def get_reencryption_need(self) -> ReencryptionNeed: """ Raises: FSError FSBackendOfflineError FSWorkspaceNoAccess """ wentry = self.get_workspace_entry() try: workspace_manifest = await self.local_storage.get_manifest( self.workspace_id) if workspace_manifest.is_placeholder: return ReencryptionNeed() except FSLocalMissError: pass certificates = await self.remote_loader.load_realm_role_certificates() has_role = set() role_revoked = set() for certif in certificates: if certif.role is None: if certif.timestamp > wentry.encrypted_on: role_revoked.add(certif.user_id) has_role.discard(certif.user_id) else: role_revoked.discard(certif.user_id) has_role.add(certif.user_id) user_revoked = [] try: for user_id in has_role: _, revoked_user = await self.remote_device_manager.get_user( user_id, no_cache=True) if revoked_user and revoked_user.timestamp > wentry.encrypted_on: user_revoked.append(user_id) except RemoteDevicesManagerBackendOfflineError as exc: raise FSBackendOfflineError(str(exc)) from exc except RemoteDevicesManagerError as exc: raise FSError( f"Cannot retrieve workspace participant {user_id}: {exc}" ) from exc return ReencryptionNeed(user_revoked=tuple(user_revoked), role_revoked=tuple(role_revoked))
async def _process_message(self, sender_id: DeviceID, expected_timestamp: Pendulum, ciphered: bytes): """ Raises: FSError FSBackendOfflineError FSSharingNotAllowedError """ # Retrieve the sender try: sender = await self.remote_devices_manager.get_device(sender_id) except RemoteDevicesManagerBackendOfflineError as exc: raise FSBackendOfflineError(str(exc)) from exc except RemoteDevicesManagerError as exc: raise FSError( f"Cannot retrieve message sender `{sender_id}`: {exc}" ) from exc # Decrypt&verify message try: msg = MessageContent.decrypt_verify_and_load_for( ciphered, recipient_privkey=self.device.private_key, author_verify_key=sender.verify_key, expected_author=sender_id, expected_timestamp=expected_timestamp, ) except DataError as exc: raise FSError( f"Cannot decrypt&validate message from `{sender_id}`: {exc}" ) from exc if isinstance( msg, (SharingGrantedMessageContent, SharingReencryptedMessageContent)): await self._process_message_sharing_granted(msg) elif isinstance(msg, SharingRevokedMessageContent): await self._process_message_sharing_revoked(msg) elif isinstance(msg, PingMessageContent): self.event_bus.send("pinged", ping=msg.ping)
async def _vlob_update( self, encryption_revision: int, entry_id: EntryID, ciphered: bytes, now: DateTime, version: int, ) -> None: """ Raises: FSError FSRemoteSyncError FSBackendOfflineError FSWorkspaceInMaintenance FSBadEncryptionRevision FSWorkspaceNoAccess """ # Vlob upload with translate_backend_cmds_errors(): rep = await self.backend_cmds.vlob_update(encryption_revision, entry_id, version, now, ciphered) if rep["status"] == "not_found": raise FSRemoteSyncError(entry_id) elif rep["status"] == "not_allowed": # Seems we lost the access to the realm raise FSWorkspaceNoWriteAccess( "Cannot upload manifest: no write access") elif rep["status"] == "bad_version": raise FSRemoteSyncError(entry_id) elif rep["status"] == "bad_timestamp": # Quick and dirty fix before a better version with a retry loop : go offline so we # don't have to deal with another client updating manifest with a later timestamp raise FSBackendOfflineError(rep) elif rep["status"] == "bad_encryption_revision": raise FSBadEncryptionRevision( f"Cannot update vlob {entry_id}: Bad encryption revision provided" ) elif rep["status"] == "in_maintenance": raise FSWorkspaceInMaintenance( "Cannot create vlob while the workspace is in maintenance") elif rep["status"] != "ok": raise FSError(f"Cannot update vlob {entry_id}: `{rep['status']}`")
async def _send_start_reencryption_cmd( self, workspace_id: EntryID, encryption_revision: int, timestamp: DateTime, per_user_ciphered_msgs: Dict[UserID, bytes], ) -> bool: """ Raises: FSError FSBackendOfflineError FSWorkspaceNoAccess BackendCmdsParticipantsMismatchError """ # Finally send command to the backend try: rep = await self.backend_cmds.realm_start_reencryption_maintenance( RealmID(workspace_id.uuid), encryption_revision, timestamp, per_user_ciphered_msgs) except BackendNotAvailable as exc: raise FSBackendOfflineError(str(exc)) from exc except BackendConnectionError as exc: raise FSError( f"Cannot start maintenance on workspace {workspace_id}: {exc}" ) from exc if rep["status"] == "participants_mismatch": # Catched by caller return False elif rep["status"] == "in_maintenance": raise FSWorkspaceInMaintenance( f"Workspace {workspace_id} already in maintenance: {rep}") elif rep["status"] == "not_allowed": raise FSWorkspaceNoAccess( f"Not allowed to start maintenance on workspace {workspace_id}: {rep}" ) elif rep["status"] != "ok": raise FSError( f"Cannot start maintenance on workspace {workspace_id}: {rep}") return True
async def do_one_batch(self, size=100) -> Tuple[int, int]: """ Raises: FSError FSBackendOfflineError FSWorkspaceInMaintenance FSWorkspaceNoAccess """ workspace_id = self.new_workspace_entry.id new_encryption_revision = self.new_workspace_entry.encryption_revision # Get the batch try: rep = await self.backend_cmds.vlob_maintenance_get_reencryption_batch( workspace_id, new_encryption_revision, size ) if rep["status"] in ("not_in_maintenance", "bad_encryption_revision"): raise FSWorkspaceNotInMaintenance(f"Reencryption job already finished: {rep}") elif rep["status"] == "not_allowed": raise FSWorkspaceNoAccess( f"Not allowed to do reencryption maintenance on workspace {workspace_id}: {rep}" ) elif rep["status"] != "ok": raise FSError( f"Cannot do reencryption maintenance on workspace {workspace_id}: {rep}" ) donebatch = [] for item in rep["batch"]: cleartext = self.old_workspace_entry.key.decrypt(item["blob"]) newciphered = self.new_workspace_entry.key.encrypt(cleartext) donebatch.append((item["vlob_id"], item["version"], newciphered)) rep = await self.backend_cmds.vlob_maintenance_save_reencryption_batch( workspace_id, new_encryption_revision, donebatch ) if rep["status"] in ("not_in_maintenance", "bad_encryption_revision"): raise FSWorkspaceNotInMaintenance(f"Reencryption job already finished: {rep}") elif rep["status"] == "not_allowed": raise FSWorkspaceNoAccess( f"Not allowed to do reencryption maintenance on workspace {workspace_id}: {rep}" ) elif rep["status"] != "ok": raise FSError( f"Cannot do reencryption maintenance on workspace {workspace_id}: {rep}" ) total = rep["total"] done = rep["done"] if total == done: # Finish the maintenance rep = await self.backend_cmds.realm_finish_reencryption_maintenance( workspace_id, new_encryption_revision ) if rep["status"] in ("not_in_maintenance", "bad_encryption_revision"): raise FSWorkspaceNotInMaintenance(f"Reencryption job already finished: {rep}") elif rep["status"] == "not_allowed": raise FSWorkspaceNoAccess( f"Not allowed to do reencryption maintenance on workspace {workspace_id}: {rep}" ) elif rep["status"] != "ok": raise FSError( f"Cannot do reencryption maintenance on workspace {workspace_id}: {rep}" ) except BackendNotAvailable as exc: raise FSBackendOfflineError(str(exc)) from exc except BackendConnectionError as exc: raise FSError( f"Cannot do reencryption maintenance on workspace {workspace_id}: {exc}" ) from exc return total, done
async def workspace_share( self, workspace_id: EntryID, recipient: UserID, role: Optional[WorkspaceRole] ) -> None: """ Raises: FSError FSWorkspaceNotFoundError FSBackendOfflineError FSSharingNotAllowedError """ if self.device.user_id == recipient: raise FSError("Cannot share to oneself") user_manifest = self.get_user_manifest() workspace_entry = user_manifest.get_workspace_entry(workspace_id) if not workspace_entry: raise FSWorkspaceNotFoundError(f"Unknown workspace `{workspace_id}`") # Make sure the workspace is not a placeholder await self._workspace_minimal_sync(workspace_entry) # Retrieve the user try: recipient_user, revoked_recipient_user = await self.remote_devices_manager.get_user( recipient ) except RemoteDevicesManagerBackendOfflineError as exc: raise FSBackendOfflineError(str(exc)) from exc except RemoteDevicesManagerError as exc: raise FSError(f"Cannot retreive recipient: {exc}") from exc if revoked_recipient_user: raise FSError(f"User {recipient} revoked") # Note we don't bother to check workspace's access roles given they # could be outdated (and backend will do the check anyway) now = pendulum_now() # Build the sharing message try: if role is not None: recipient_message = SharingGrantedMessageContent( author=self.device.device_id, timestamp=now, name=workspace_entry.name, id=workspace_entry.id, encryption_revision=workspace_entry.encryption_revision, encrypted_on=workspace_entry.encrypted_on, key=workspace_entry.key, ) else: recipient_message = SharingRevokedMessageContent( author=self.device.device_id, timestamp=now, id=workspace_entry.id ) ciphered_recipient_message = recipient_message.dump_sign_and_encrypt_for( author_signkey=self.device.signing_key, recipient_pubkey=recipient_user.public_key ) except DataError as exc: raise FSError(f"Cannot create sharing message for `{recipient}`: {exc}") from exc # Build role certificate role_certificate = RealmRoleCertificateContent( author=self.device.device_id, timestamp=now, realm_id=workspace_id, user_id=recipient, role=role, ).dump_and_sign(self.device.signing_key) # Actually send the command to the backend try: rep = await self.backend_cmds.realm_update_roles( role_certificate, ciphered_recipient_message ) except BackendNotAvailable as exc: raise FSBackendOfflineError(str(exc)) from exc except BackendConnectionError as exc: raise FSError(f"Error while trying to set vlob group roles in backend: {exc}") from exc if rep["status"] == "not_allowed": raise FSSharingNotAllowedError( f"Must be Owner or Manager on the workspace is mandatory to share it: {rep}" ) elif rep["status"] == "in_maintenance": raise FSWorkspaceInMaintenance( f"Cannot share workspace while it is in maintenance: {rep}" ) elif rep["status"] == "already_granted": # Stay idempotent return elif rep["status"] != "ok": raise FSError(f"Error while trying to set vlob group roles in backend: {rep}")
async def _outbound_sync_inner(self) -> bool: base_um = self.get_user_manifest() if not base_um.need_sync: return True # Make sure the corresponding realm has been created in the backend if base_um.is_placeholder: certif = RealmRoleCertificateContent.build_realm_root_certif( author=self.device.device_id, timestamp=pendulum_now(), realm_id=self.device.user_manifest_id, ).dump_and_sign(self.device.signing_key) try: rep = await self.backend_cmds.realm_create(certif) except BackendNotAvailable as exc: raise FSBackendOfflineError(str(exc)) from exc except BackendConnectionError as exc: raise FSError(f"Cannot create user manifest's realm in backend: {exc}") from exc if rep["status"] == "already_exists": # It's possible a previous attempt to create this realm # succeeded but we didn't receive the confirmation, hence # we play idempotent here. pass elif rep["status"] != "ok": raise FSError(f"Cannot create user manifest's realm in backend: {rep}") # Sync placeholders for w in base_um.workspaces: await self._workspace_minimal_sync(w) # Build vlob now = pendulum_now() to_sync_um = base_um.to_remote(author=self.device.device_id, timestamp=now) ciphered = to_sync_um.dump_sign_and_encrypt( author_signkey=self.device.signing_key, key=self.device.user_manifest_key ) # Sync the vlob with backend try: # Note encryption_revision is always 1 given we never reencrypt # the user manifest's realm if to_sync_um.version == 1: rep = await self.backend_cmds.vlob_create( self.user_manifest_id, 1, self.user_manifest_id, now, ciphered ) else: rep = await self.backend_cmds.vlob_update( 1, self.user_manifest_id, to_sync_um.version, now, ciphered ) except BackendNotAvailable as exc: raise FSBackendOfflineError(str(exc)) from exc except BackendConnectionError as exc: raise FSError(f"Cannot sync user manifest: {exc}") from exc if rep["status"] in ("already_exists", "bad_version"): # Concurrency error (handled by the caller) return False elif rep["status"] == "in_maintenance": raise FSWorkspaceInMaintenance( f"Cannot modify workspace data while it is in maintenance: {rep}" ) elif rep["status"] != "ok": raise FSError(f"Cannot sync user manifest: {rep}") # Merge back the manifest in local async with self._update_user_manifest_lock: diverged_um = self.get_user_manifest() # Final merge could have been achieved by a concurrent operation if to_sync_um.version > diverged_um.base_version: merged_um = merge_local_user_manifests(diverged_um, to_sync_um) await self.set_user_manifest(merged_um) self.event_bus.send("fs.entry.synced", id=self.user_manifest_id) return True