class SCHEMA_CLS(BaseSignedDataSchema): type = fields.EnumCheckedConstant(ManifestType.WORKSPACE_MANIFEST, required=True) id = EntryIDField(required=True) # Version 0 means the data is not synchronized version = fields.Integer(required=True, validate=validate.Range(min=0)) created = fields.DateTime(required=True) updated = fields.DateTime(required=True) children = fields.FrozenMap( EntryNameField(validate=validate.Length(min=1, max=256)), EntryIDField(required=True), required=True, ) @pre_load def fix_legacy(self, data: Dict[str, T]) -> Dict[str, T]: # Compatibility with versions <= 1.14 if data["author"] is None: data["author"] = LOCAL_AUTHOR_LEGACY_PLACEHOLDER return data @post_load def make_obj(self, data: Dict[str, Any]) -> "WorkspaceManifest": data.pop("type") return WorkspaceManifest(**data)
class SCHEMA_CLS(BaseSignedDataSchema): type = fields.EnumCheckedConstant(ManifestType.FILE_MANIFEST, required=True) id = EntryIDField(required=True) parent = EntryIDField(required=True) # Version 0 means the data is not synchronized version = fields.Integer(required=True, validate=validate.Range(min=0)) created = fields.DateTime(required=True) updated = fields.DateTime(required=True) size = fields.Integer(required=True, validate=validate.Range(min=0)) blocksize = fields.Integer(required=True, validate=validate.Range(min=8)) blocks = fields.FrozenList(fields.Nested(BlockAccess.SCHEMA_CLS), required=True) @pre_load def fix_legacy(self, data: Dict[str, T]) -> Dict[str, T]: # Compatibility with versions <= 1.14 if data["author"] is None: data["author"] = LOCAL_AUTHOR_LEGACY_PLACEHOLDER return data @post_load def make_obj(self, data: Dict[str, Any]) -> "FileManifest": data.pop("type") return FileManifest(**data)
class TimestampOutOfBallparkRepSchema(BaseRepSchema): """This schema has been added to API version 2.4 (Parsec v2.7.0). However, it re-uses the `bad_timestamp` status that was used for similar errors in previous backend versions. For compatibility purposes, this schema should be compatible with `ErrorRepSchema` in the sense that: - an `ErrorRepSchema` with status `bad_timestamp` should be able to deserialize into a `TimestampOutOfBallparkRepSchema` - a `TimestampOutOfBallparkRepSchema` should be able to deserialize into an `ErrorRepSchema` with status `bad_timestamp New clients who wishes to use those fields should check for their existence first. TODO: This backward compatibility should be removed once Parsec < 2.4 support is dropped """ # `bad_timestamp` is kept for backward compatibility, # even though `timestamp_out_of_ballpark` would be more explicit status: fields.CheckedConstant = fields.CheckedConstant("bad_timestamp", required=True) ballpark_client_early_offset = fields.Float(required=False, allow_none=False) ballpark_client_late_offset = fields.Float(required=False, allow_none=False) client_timestamp = fields.DateTime(required=False, allow_none=False) backend_timestamp = fields.DateTime(required=False, allow_none=False) @post_load def make_obj(self, data: Dict[str, Any]) -> Dict[str, Any]: # type: ignore[misc] # Cannot use `missing=None` with `allow_none=False` data.setdefault("ballpark_client_early_offset", None) data.setdefault("ballpark_client_late_offset", None) data.setdefault("client_timestamp", None) data.setdefault("backend_timestamp", None) return data
class PkiEnrollmentInfoRepAcceptedSchema(BaseRepSchema): enrollment_status = fields.EnumCheckedConstant( PkiEnrollmentStatus.ACCEPTED, required=True) submitted_on = fields.DateTime(required=True) accepted_on = fields.DateTime(required=True) accepter_der_x509_certificate = fields.Bytes(required=True) accept_payload_signature = fields.Bytes(required=True) accept_payload = fields.Bytes( required=True) # Signature should be checked before loading
class DeviceSchema(UnknownCheckedSchema): device_id = fields.DeviceID(required=True) created_on = fields.DateTime(required=True) revocated_on = fields.DateTime(allow_none=True) certified_revocation = fields.Bytes(allow_none=True) revocation_certifier = fields.DeviceID(allow_none=True) certified_device = fields.Bytes(required=True) device_certifier = fields.DeviceID(allow_none=True)
class VlobReadRepSchema(BaseRepSchema): version = fields.Integer(required=True, validate=_validate_version) blob = fields.Bytes(required=True) author = DeviceIDField(required=True) timestamp = fields.DateTime(required=True) # This field is used by the client to figure out if its role certificate cache is up-to-date enough # to be able to perform the proper integrity checks on the manifest timestamp. # The `missing=None` argument is used to provide compatibilty of new clients with old backends. # New in API version 2.3 (Parsec 2.6.0) author_last_role_granted_on = fields.DateTime(required=False, allow_none=True, missing=None)
class SCHEMA_CLS(BaseSchema): name = EntryNameField(required=True) id = EntryIDField(required=True) key = fields.SecretKey(required=True) encryption_revision = fields.Int(required=True, validate=validate.Range(min=0)) encrypted_on = fields.DateTime(required=True) role_cached_on = fields.DateTime(required=True) role = RealmRoleField(required=True, allow_none=True) @post_load def make_obj(self, data: Dict[str, Any]) -> "WorkspaceEntry": return WorkspaceEntry(**data)
class PkiEnrollmentListItemSchema(BaseSchema): enrollment_id = fields.UUID(required=True) submitted_on = fields.DateTime(required=True) submitter_der_x509_certificate = fields.Bytes(required=True) submit_payload_signature = fields.Bytes(required=True) submit_payload = fields.Bytes( required=True) # Signature should be checked before loading
class VlobListVersionsRepSchema(BaseRepSchema): versions = fields.Map( fields.Integer(required=True), fields.Tuple(fields.DateTime(required=True), DeviceIDField(required=True)), required=True, )
class RealmStartReencryptionMaintenanceReqSchema(BaseReqSchema): realm_id = fields.UUID(required=True) encryption_revision = fields.Integer(required=True) timestamp = fields.DateTime(required=True) per_participant_message = fields.Map(UserIDField(), fields.Bytes(required=True), required=True)
class SCHEMA_CLS(BaseSchema): type = fields.EnumCheckedConstant( LocalManifestType.LOCAL_WORKSPACE_MANIFEST, required=True) base = fields.Nested(RemoteWorkspaceManifest.SCHEMA_CLS, required=True) need_sync = fields.Boolean(required=True) updated = fields.DateTime(required=True) children = fields.FrozenMap(EntryNameField(), EntryIDField(required=True), required=True) # Confined entries are entries that are meant to stay locally and not be added # to the uploaded remote manifest when synchronizing. The criteria for being # confined is to have a filename that matched the "prevent sync" pattern at the time of # the last change (or when a new filter was successfully applied) local_confinement_points = fields.FrozenSet( EntryIDField(required=True)) # Filtered entries are entries present in the base manifest that are not exposed # locally. We keep track of them to remember that those entries have not been # deleted locally and hence should be restored when crafting the remote manifest # to upload. remote_confinement_points = fields.FrozenSet( EntryIDField(required=True)) @post_load def make_obj(self, data): data.pop("type") data.setdefault("local_confinement_points", frozenset()) data.setdefault("remote_confinement_points", frozenset()) return LocalWorkspaceManifest(**data)
class SCHEMA_CLS(BaseSignedDataSchema): type = fields.CheckedConstant("user_manifest", required=True) id = EntryIDField(required=True) # Version 0 means the data is not synchronized (hence author sould be None) version = fields.Integer(required=True, validate=validate.Range(min=0)) created = fields.DateTime(required=True) updated = fields.DateTime(required=True) last_processed_message = fields.Integer(required=True, validate=validate.Range(min=0)) workspaces = fields.List(fields.Nested(WorkspaceEntry.SCHEMA_CLS), required=True) @post_load def make_obj(self, data): data.pop("type") return UserManifest(**data)
class VlobReadReqSchema(BaseReqSchema): encryption_revision = fields.Integer(required=True) vlob_id = VlobIDField(required=True) version = fields.Integer( required=True, allow_none=True, validate=lambda n: n is None or _validate_version(n)) timestamp = fields.DateTime(required=True, allow_none=True)
class SCHEMA_CLS(BaseSignedDataSchema): type = fields.CheckedConstant("workspace_manifest", required=True) id = EntryIDField(required=True) # Version 0 means the data is not synchronized (hence author sould be None) version = fields.Integer(required=True, validate=validate.Range(min=0)) created = fields.DateTime(required=True) updated = fields.DateTime(required=True) children = fields.FrozenMap( EntryNameField(validate=validate.Length(min=1, max=256)), EntryIDField(required=True), required=True, ) @post_load def make_obj(self, data): data.pop("type") return WorkspaceManifest(**data)
class SCHEMA_CLS(BaseSignedDataSchema): type = fields.CheckedConstant("file_manifest", required=True) id = EntryIDField(required=True) parent = EntryIDField(required=True) # Version 0 means the data is not synchronized (hence author sould be None) version = fields.Integer(required=True, validate=validate.Range(min=0)) created = fields.DateTime(required=True) updated = fields.DateTime(required=True) size = fields.Integer(required=True, validate=validate.Range(min=0)) blocksize = fields.Integer(required=True, validate=validate.Range(min=8)) blocks = fields.FrozenList(fields.Nested(BlockAccess.SCHEMA_CLS), required=True) @post_load def make_obj(self, data): data.pop("type") return FileManifest(**data)
class UserSchema(UnknownCheckedSchema): user_id = fields.String(required=True) is_admin = fields.Boolean(required=True) created_on = fields.DateTime(required=True) certified_user = fields.Bytes(required=True) user_certifier = fields.DeviceID(allow_none=True) devices = fields.Map(fields.DeviceName(), fields.Nested(DeviceSchema), required=True)
class VlobCreateReqSchema(BaseReqSchema): realm_id = fields.UUID(required=True) encryption_revision = fields.Integer(required=True) vlob_id = fields.UUID(required=True) # If blob contains a signed message, it timestamp cannot be directly enforced # by the backend (given the message is probably also encrypted). # Hence the timestamp is passed in clear so backend can reject the message # if it considers the timestamp invalid. On top of that each client asking # for the message will receive the declared timestamp to check against # the actual timestamp within the message. timestamp = fields.DateTime(required=True) blob = fields.Bytes(required=True)
class SCHEMA_CLS(BaseSchema): type = fields.EnumCheckedConstant( LocalManifestType.LOCAL_WORKSPACE_MANIFEST, required=True) base = fields.Nested(_PyWorkspaceManifest.SCHEMA_CLS, required=True) need_sync = fields.Boolean(required=True) updated = fields.DateTime(required=True) children = fields.FrozenMap(EntryNameField(), EntryIDField(), required=True) # Added in Parsec v1.15 # Confined entries are entries that are meant to stay locally and not be added # to the uploaded remote manifest when synchronizing. The criteria for being # confined is to have a filename that matched the "prevent sync" pattern at the time of # the last change (or when a new filter was successfully applied) local_confinement_points = fields.FrozenSet(EntryIDField(), allow_none=False, required=False, missing=frozenset()) # Added in Parsec v1.15 # Filtered entries are entries present in the base manifest that are not exposed # locally. We keep track of them to remember that those entries have not been # deleted locally and hence should be restored when crafting the remote manifest # to upload. remote_confinement_points = fields.FrozenSet(EntryIDField(), allow_none=False, required=False, missing=frozenset()) # Added in Parsec v1.15 # Speculative placeholders are created when we want to access a workspace # but didn't retrieve manifest data from backend yet. This implies: # - non-placeholders cannot be speculative # - the only non-speculative placeholder is the placeholder initialized # during the initial workspace creation # This speculative information is useful during merge to understand if # a data is not present in the placeholder compared with a remote because: # a) the data is not locally known (speculative is True) # b) the data is known, but has been locally removed (speculative is False) # Prevented to be `required=True` by backward compatibility speculative = fields.Boolean(allow_none=False, required=False, missing=False) @post_load def make_obj(self, data): # TODO: Ensure non-placeholder cannot be marked speculative assert data["speculative"] is False or data["base"].version == 0 # TODO: Should this assert be in remote workspace manifest definition instead ? # TODO: but in theory remote workspace manifest should assert version > 0 ! assert data["base"].version != 0 or not data["base"].children data.pop("type") return LocalWorkspaceManifest(**data)
class SCHEMA_CLS(BaseSchema): type = fields.CheckedConstant("local_pending_enrollment", required=True) x509_certificate = fields.Nested(X509Certificate.SCHEMA_CLS, required=True) addr = BackendPkiEnrollmentAddrField(required=True) submitted_on = fields.DateTime(required=True) enrollment_id = fields.UUID(required=True) submit_payload = fields.Nested(PkiEnrollmentSubmitPayload.SCHEMA_CLS, required=True) encrypted_key = fields.Bytes(required=True) ciphertext = fields.Bytes(required=True) # An encrypted PendingDeviceKeys @post_load def make_obj(self, data): data.pop("type", None) return LocalPendingEnrollment(**data)
class SCHEMA_CLS(BaseSignedDataSchema): type = fields.EnumCheckedConstant(ManifestType.USER_MANIFEST, required=True) id = EntryIDField(required=True) # Version 0 means the data is not synchronized version = fields.Integer(required=True, validate=validate.Range(min=0)) created = fields.DateTime(required=True) updated = fields.DateTime(required=True) last_processed_message = fields.Integer(required=True, validate=validate.Range(min=0)) workspaces = fields.List(fields.Nested(WorkspaceEntry.SCHEMA_CLS), required=True) @pre_load def fix_legacy(self, data: Dict[str, T]) -> Dict[str, T]: # Compatibility with versions <= 1.14 if data["author"] is None: data["author"] = LOCAL_AUTHOR_LEGACY_PLACEHOLDER return data @post_load def make_obj(self, data: Dict[str, Any]) -> "UserManifest": data.pop("type") return UserManifest(**data)
class SCHEMA_CLS(BaseSchema): type = fields.EnumCheckedConstant( LocalManifestType.LOCAL_USER_MANIFEST, required=True) base = fields.Nested(RemoteUserManifest.SCHEMA_CLS, required=True) need_sync = fields.Boolean(required=True) updated = fields.DateTime(required=True) last_processed_message = fields.Integer(required=True, validate=validate.Range(min=0)) workspaces = fields.FrozenList(fields.Nested( WorkspaceEntry.SCHEMA_CLS), required=True) @post_load def make_obj(self, data): data.pop("type") return LocalUserManifest(**data)
class SCHEMA_CLS(BaseSignedDataSchema): type = fields.CheckedConstant("sharing.granted", required=True) name = fields.String(required=True) id = EntryIDField(required=True) encryption_revision = fields.Integer(required=True) encrypted_on = fields.DateTime(required=True) key = fields.SecretKey(required=True) # Don't include role given the only reliable way to get this information # is to fetch the realm role certificate from the backend. # Besides, we will also need the message sender's realm role certificate # to make sure he is an owner. @post_load def make_obj(self, data): data.pop("type") return SharingGrantedMessageContent(**data)
class SCHEMA_CLS(BaseSchema): type = fields.EnumCheckedConstant( LocalManifestType.LOCAL_FILE_MANIFEST, required=True) base = fields.Nested(RemoteFileManifest.SCHEMA_CLS, required=True) need_sync = fields.Boolean(required=True) updated = fields.DateTime(required=True) size = fields.Integer(required=True, validate=validate.Range(min=0)) blocksize = fields.Integer(required=True, validate=validate.Range(min=8)) blocks = fields.FrozenList(fields.FrozenList( fields.Nested(Chunk.SCHEMA_CLS)), required=True) @post_load def make_obj(self, data): data.pop("type") return LocalFileManifest(**data)
class SCHEMA_CLS(BaseSignedDataSchema): type = fields.EnumCheckedConstant(MessageContentType.SHARING_GRANTED, required=True) name = EntryNameField(required=True) id = EntryIDField(required=True) encryption_revision = fields.Integer(required=True) encrypted_on = fields.DateTime(required=True) key = fields.SecretKey(required=True) # Don't include role given the only reliable way to get this information # is to fetch the realm role certificate from the backend. # Besides, we will also need the message sender's realm role certificate # to make sure he is an owner. @post_load def make_obj( # type: ignore[misc] self, data: Dict[str, Any]) -> "SharingGrantedMessageContent": data.pop("type") return SharingGrantedMessageContent(**data)
class HandshakeChallengeSchema(BaseSchema): handshake = fields.CheckedConstant("challenge", required=True) challenge = fields.Bytes(required=True) supported_api_versions = fields.List(ApiVersionField(), required=True) # Those fields have been added to API version 2.4 (Parsec 2.7.0) # They are provided to the client in order to allow them to detect whether # their system clock is out of sync and let them close the connection. # They will be missing for older backend so they cannot be strictly required. # TODO: This backward compatibility should be removed once Parsec < 2.4 support is dropped ballpark_client_early_offset = fields.Float(required=False, allow_none=False) ballpark_client_late_offset = fields.Float(required=False, allow_none=False) backend_timestamp = fields.DateTime(required=False, allow_none=False) @post_load def make_obj(self, data: Dict[str, Any]) -> Dict[str, Any]: # type: ignore[misc] # Cannot use `missing=None` with `allow_none=False` data.setdefault("ballpark_client_early_offset", None) data.setdefault("ballpark_client_late_offset", None) data.setdefault("backend_timestamp", None) return data
class SCHEMA_CLS(OneOfSchema, BaseSchema): type_field = "type" base = fields.Nested(BaseRemoteManifest.SCHEMA_CLS, required=True) need_sync = fields.Boolean(required=True) updated = fields.DateTime(required=True) @property def type_schemas(self): return { LocalManifestType.LOCAL_FILE_MANIFEST: LocalFileManifest.SCHEMA_CLS, LocalManifestType.LOCAL_FOLDER_MANIFEST: LocalFolderManifest.SCHEMA_CLS, LocalManifestType.LOCAL_WORKSPACE_MANIFEST: LocalWorkspaceManifest.SCHEMA_CLS, LocalManifestType.LOCAL_USER_MANIFEST: LocalUserManifest.SCHEMA_CLS, } def get_obj_type(self, obj): return obj["type"]
class SCHEMA_CLS(BaseSchema): type = fields.EnumCheckedConstant( LocalManifestType.LOCAL_USER_MANIFEST, required=True) base = fields.Nested(_PyUserManifest.SCHEMA_CLS, required=True) need_sync = fields.Boolean(required=True) updated = fields.DateTime(required=True) last_processed_message = fields.Integer(required=True, validate=validate.Range(min=0)) workspaces = fields.FrozenList(fields.Nested( _PyWorkspaceEntry.SCHEMA_CLS), required=True) # Added in Parsec v1.15 # Speculative placeholders are created when we want to access the # user manifest but didn't retrieve it from backend yet. This implies: # - non-placeholders cannot be speculative # - the only non-speculative placeholder is the placeholder initialized # during the initial user claim (by opposition of subsequent device # claims on the same user) # This speculative information is useful during merge to understand if # a data is not present in the placeholder compared with a remote because: # a) the data is not locally known (speculative is True) # b) the data is known, but has been locally removed (speculative is False) # Prevented to be `required=True` by backward compatibility speculative = fields.Boolean(allow_none=False, required=False, missing=False) @post_load def make_obj(self, data): data.pop("type") # TODO: Ensure non-placeholder cannot be marked speculative assert data["speculative"] is False or data["base"].version == 0 # TODO: Should this assert be in remote workspace manifest definition instead ? # TODO: but in theory remote workspace manifest should assert version > 0 ! assert data["base"].version != 0 or not data["base"].workspaces return LocalUserManifest(**data)
class BaseSignedDataSchema(BaseSchema): author = DeviceIDField(required=True, allow_none=False) timestamp = fields.DateTime(required=True)
class APIV1_OrganizationStatusRepSchema(BaseRepSchema): is_bootstrapped = fields.Boolean(required=True) expiration_date = fields.DateTime(allow_none=True, required=False) user_profile_outsider_allowed = fields.Boolean(required=True)
class APIV1_OrganizationCreateReqSchema(BaseReqSchema): organization_id = OrganizationIDField(required=True) expiration_date = fields.DateTime(allow_none=True, required=False)