def insert_graph_receipt_txn(self, txn, room_id, receipt_type, user_id, event_ids, data): assert self._can_write_to_receipts txn.call_after(self.get_receipts_for_room.invalidate, (room_id, receipt_type)) txn.call_after( self._invalidate_get_users_with_receipts_in_room, room_id, receipt_type, user_id, ) txn.call_after(self.get_receipts_for_user.invalidate, (user_id, receipt_type)) # FIXME: This shouldn't invalidate the whole cache txn.call_after(self._get_linearized_receipts_for_room.invalidate_many, (room_id, )) self.db_pool.simple_delete_txn( txn, table="receipts_graph", keyvalues={ "room_id": room_id, "receipt_type": receipt_type, "user_id": user_id, }, ) self.db_pool.simple_insert_txn( txn, table="receipts_graph", values={ "room_id": room_id, "receipt_type": receipt_type, "user_id": user_id, "event_ids": json_encoder.encode(event_ids), "data": json_encoder.encode(data), }, )
def _insert_graph_receipt_txn( self, txn: LoggingTransaction, room_id: str, receipt_type: str, user_id: str, event_ids: List[str], data: JsonDict, ) -> None: assert self._can_write_to_receipts txn.call_after( self._get_receipts_for_user_with_orderings.invalidate, (user_id, receipt_type), ) # FIXME: This shouldn't invalidate the whole cache txn.call_after(self._get_linearized_receipts_for_room.invalidate, (room_id,)) self.db_pool.simple_delete_txn( txn, table="receipts_graph", keyvalues={ "room_id": room_id, "receipt_type": receipt_type, "user_id": user_id, }, ) self.db_pool.simple_insert_txn( txn, table="receipts_graph", values={ "room_id": room_id, "receipt_type": receipt_type, "user_id": user_id, "event_ids": json_encoder.encode(event_ids), "data": json_encoder.encode(data), }, )
def get_tag_content( txn: LoggingTransaction, tag_ids ) -> List[Tuple[int, Tuple[str, str, str]]]: sql = "SELECT tag, content FROM room_tags WHERE user_id=? AND room_id=?" results = [] for stream_id, user_id, room_id in tag_ids: txn.execute(sql, (user_id, room_id)) tags = [] for tag, content in txn: tags.append(json_encoder.encode(tag) + ":" + content) tag_json = "{" + ",".join(tags) + "}" results.append((stream_id, (user_id, room_id, tag_json))) return results
def __init__(self, hs: "HomeServer"): super().__init__() self._serve_server_wellknown = hs.config.server.serve_server_wellknown host, port = parse_server_name(hs.config.server.server_name) # If we've got this far, then https://<server_name>/ must route to us, so # we just redirect the traffic to port 443 instead of 8448. if port is None: port = 443 self._response = json_encoder.encode({ "m.server": f"{host}:{port}" }).encode("utf-8")
def to_line(self): return ( self.user_id + " " + json_encoder.encode( ( self.access_token, self.ip, self.user_agent, self.device_id, self.last_seen, ) ) )
async def set_push_rule_actions( self, user_id, rule_id, actions, is_default_rule ) -> None: actions_json = json_encoder.encode(actions) def set_push_rule_actions_txn(txn, stream_id, event_stream_ordering): if is_default_rule: # Add a dummy rule to the rules table with the user specified # actions. priority_class = -1 priority = 1 self._upsert_push_rule_txn( txn, stream_id, event_stream_ordering, user_id, rule_id, priority_class, priority, "[]", actions_json, update_stream=False, ) else: self.db_pool.simple_update_one_txn( txn, "push_rules", {"user_name": user_id, "rule_id": rule_id}, {"actions": actions_json}, ) self._insert_push_rules_update_txn( txn, stream_id, event_stream_ordering, user_id, rule_id, op="ACTIONS", data={"actions": actions_json}, ) with self._push_rules_stream_id_gen.get_next() as ids: stream_id, event_stream_ordering = ids await self.db_pool.runInteraction( "set_push_rule_actions", set_push_rule_actions_txn, stream_id, event_stream_ordering, )
async def add_pusher( self, user_id: str, access_token: Optional[int], kind: str, app_id: str, app_display_name: str, device_display_name: str, pushkey: str, pushkey_ts: int, lang: Optional[str], data: Optional[JsonDict], last_stream_ordering: int, profile_tag: str = "", ) -> None: async with self._pushers_id_gen.get_next() as stream_id: # no need to lock because `pushers` has a unique key on # (app_id, pushkey, user_name) so simple_upsert will retry await self.db_pool.simple_upsert( table="pushers", keyvalues={"app_id": app_id, "pushkey": pushkey, "user_name": user_id}, values={ "access_token": access_token, "kind": kind, "app_display_name": app_display_name, "device_display_name": device_display_name, "ts": pushkey_ts, "lang": lang, "data": json_encoder.encode(data), "last_stream_ordering": last_stream_ordering, "profile_tag": profile_tag, "id": stream_id, }, desc="add_pusher", lock=False, ) user_has_pusher = self.get_if_user_has_pusher.cache.get_immediate( (user_id,), None, update_metrics=False ) if user_has_pusher is not True: # invalidate, since we the user might not have had a pusher before await self.db_pool.runInteraction( "add_pusher", self._invalidate_cache_and_stream, # type: ignore self.get_if_user_has_pusher, (user_id,), )
def update_remote_attestion(self, group_id, user_id, attestation): """Update an attestation that a remote has renewed """ return self.db_pool.simple_update_one( table="group_attestations_remote", keyvalues={ "group_id": group_id, "user_id": user_id }, updatevalues={ "valid_until_ms": attestation["valid_until_ms"], "attestation_json": json_encoder.encode(attestation), }, desc="update_remote_attestion", )
async def add_account_data_to_room( self, user_id: str, room_id: str, account_data_type: str, content: JsonDict ) -> int: """Add some account_data to a room for a user. Args: user_id: The user to add a tag for. room_id: The room to add a tag for. account_data_type: The type of account_data to add. content: A json object to associate with the tag. Returns: The maximum stream ID. """ content_json = json_encoder.encode(content) with await self._account_data_id_gen.get_next() as next_id: # no need to lock here as room_account_data has a unique constraint # on (user_id, room_id, account_data_type) so simple_upsert will # retry if there is a conflict. await self.db_pool.simple_upsert( desc="add_room_account_data", table="room_account_data", keyvalues={ "user_id": user_id, "room_id": room_id, "account_data_type": account_data_type, }, values={"stream_id": next_id, "content": content_json}, lock=False, ) # it's theoretically possible for the above to succeed and the # below to fail - in which case we might reuse a stream id on # restart, and the above update might not get propagated. That # doesn't sound any worse than the whole update getting lost, # which is what would happen if we combined the two into one # transaction. await self._update_max_stream_id(next_id) self._account_data_stream_cache.entity_has_changed(user_id, next_id) self.get_account_data_for_user.invalidate((user_id,)) self.get_account_data_for_room.invalidate((user_id, room_id)) self.get_account_data_for_room_and_type.prefill( (user_id, room_id, account_data_type), content ) return self._account_data_id_gen.get_current_token()
def add_messages_txn(txn, now_ms, stream_id): # Add the local messages directly to the local inbox. self._add_messages_to_local_device_inbox_txn( txn, stream_id, local_messages_by_user_then_device) # Add the remote messages to the federation outbox. # We'll send them to a remote server when we next send a # federation transaction to that destination. sql = ("INSERT INTO device_federation_outbox" " (destination, stream_id, queued_ts, messages_json)" " VALUES (?,?,?,?)") rows = [] for destination, edu in remote_messages_by_destination.items(): edu_json = json_encoder.encode(edu) rows.append((destination, stream_id, now_ms, edu_json)) txn.executemany(sql, rows)
def _serialize_action(actions, is_highlight): """Custom serializer for actions. This allows us to "compress" common actions. We use the fact that most users have the same actions for notifs (and for highlights). We store these default actions as the empty string rather than the full JSON. Since the empty string isn't valid JSON there is no risk of this clashing with any real JSON actions """ if is_highlight: if actions == DEFAULT_HIGHLIGHT_ACTION: return "" # We use empty string as the column is non-NULL else: if actions == DEFAULT_NOTIF_ACTION: return "" return json_encoder.encode(actions)
async def add_account_data_to_room(self, user_id: str, room_id: str, account_data_type: str, content: JsonDict) -> int: """Add some account_data to a room for a user. Args: user_id: The user to add a tag for. room_id: The room to add a tag for. account_data_type: The type of account_data to add. content: A json object to associate with the tag. Returns: The maximum stream ID. """ assert self._can_write_to_account_data assert isinstance(self._account_data_id_gen, AbstractStreamIdGenerator) content_json = json_encoder.encode(content) async with self._account_data_id_gen.get_next() as next_id: # no need to lock here as room_account_data has a unique constraint # on (user_id, room_id, account_data_type) so simple_upsert will # retry if there is a conflict. await self.db_pool.simple_upsert( desc="add_room_account_data", table="room_account_data", keyvalues={ "user_id": user_id, "room_id": room_id, "account_data_type": account_data_type, }, values={ "stream_id": next_id, "content": content_json }, lock=False, ) self._account_data_stream_cache.entity_has_changed( user_id, next_id) self.get_account_data_for_user.invalidate((user_id, )) self.get_account_data_for_room.invalidate((user_id, room_id)) self.get_account_data_for_room_and_type.prefill( (user_id, room_id, account_data_type), content) return self._account_data_id_gen.get_current_token()
def _add_user_to_group_txn(txn): self.db_pool.simple_insert_txn( txn, table="group_users", values={ "group_id": group_id, "user_id": user_id, "is_admin": is_admin, "is_public": is_public, }, ) self.db_pool.simple_delete_txn( txn, table="group_invites", keyvalues={ "group_id": group_id, "user_id": user_id }, ) if local_attestation: self.db_pool.simple_insert_txn( txn, table="group_attestations_renewals", values={ "group_id": group_id, "user_id": user_id, "valid_until_ms": local_attestation["valid_until_ms"], }, ) if remote_attestation: self.db_pool.simple_insert_txn( txn, table="group_attestations_remote", values={ "group_id": group_id, "user_id": user_id, "valid_until_ms": remote_attestation["valid_until_ms"], "attestation_json": json_encoder.encode(remote_attestation), }, )
async def add_account_data_for_user( self, user_id: str, account_data_type: str, content: JsonDict ) -> int: """Add some account_data to a room for a user. Args: user_id: The user to add a tag for. account_data_type: The type of account_data to add. content: A json object to associate with the tag. Returns: The maximum stream ID. """ content_json = json_encoder.encode(content) with await self._account_data_id_gen.get_next() as next_id: # no need to lock here as account_data has a unique constraint on # (user_id, account_data_type) so simple_upsert will retry if # there is a conflict. await self.db_pool.simple_upsert( desc="add_user_account_data", table="account_data", keyvalues={"user_id": user_id, "account_data_type": account_data_type}, values={"stream_id": next_id, "content": content_json}, lock=False, ) # it's theoretically possible for the above to succeed and the # below to fail - in which case we might reuse a stream id on # restart, and the above update might not get propagated. That # doesn't sound any worse than the whole update getting lost, # which is what would happen if we combined the two into one # transaction. # # Note: This is only here for backwards compat to allow admins to # roll back to a previous Synapse version. Next time we update the # database version we can remove this table. await self._update_max_stream_id(next_id) self._account_data_stream_cache.entity_has_changed(user_id, next_id) self.get_account_data_for_user.invalidate((user_id,)) self.get_global_account_data_by_type_for_user.invalidate( (account_data_type, user_id) ) return self._account_data_id_gen.get_current_token()
def _background_update_progress_txn(self, txn, update_name, progress): """Update the progress of a background update Args: txn(cursor): The transaction. update_name(str): The name of the background update task progress(dict): The progress of the update. """ progress_json = json_encoder.encode(progress) self.db_pool.simple_update_one_txn( txn, "background_updates", keyvalues={"update_name": update_name}, updatevalues={"progress_json": progress_json}, )
def _set_e2e_fallback_keys_txn( self, txn: LoggingTransaction, user_id: str, device_id: str, fallback_keys: JsonDict, ) -> None: # fallback_keys will usually only have one item in it, so using a for # loop (as opposed to calling simple_upsert_many_txn) won't be too bad # FIXME: make sure that only one key per algorithm is uploaded for key_id, fallback_key in fallback_keys.items(): algorithm, key_id = key_id.split(":", 1) old_key_json = self.db_pool.simple_select_one_onecol_txn( txn, table="e2e_fallback_keys_json", keyvalues={ "user_id": user_id, "device_id": device_id, "algorithm": algorithm, }, retcol="key_json", allow_none=True, ) new_key_json = encode_canonical_json(fallback_key).decode("utf-8") # If the uploaded key is the same as the current fallback key, # don't do anything. This prevents marking the key as unused if it # was already used. if old_key_json != new_key_json: self.db_pool.simple_upsert_txn( txn, table="e2e_fallback_keys_json", keyvalues={ "user_id": user_id, "device_id": device_id, "algorithm": algorithm, }, values={ "key_id": key_id, "key_json": json_encoder.encode(fallback_key), "used": False, }, )
async def create_session(self, session_type: str, value: JsonDict, expiry_ms: int) -> str: """ Creates a new pagination session for the room hierarchy endpoint. Args: session_type: The type for this session. value: The value to store. expiry_ms: How long before an item is evicted from the cache in milliseconds. Default is 0, indicating items never get evicted based on time. Returns: The newly created session ID. Raises: StoreError if a unique session ID cannot be generated. """ # autogen a session ID and try to create it. We may clash, so just # try a few times till one goes through, giving up eventually. attempts = 0 while attempts < 5: session_id = stringutils.random_string(24) try: await self.db_pool.simple_insert( table="sessions", values={ "session_id": session_id, "session_type": session_type, "value": json_encoder.encode(value), "expiry_time_ms": self.hs.get_clock().time_msec() + expiry_ms, }, desc="create_session", ) return session_id except self.db_pool.engine.module.IntegrityError: attempts += 1 raise StoreError(500, "Couldn't generate a session ID.")
async def set_ui_auth_clientdict(self, session_id: str, clientdict: JsonDict) -> None: """ Store an updated clientdict for a given session ID. Args: session_id: The ID of this session as returned from check_auth clientdict: The dictionary from the client root level, not the 'auth' key. """ # The clientdict gets stored as JSON. clientdict_json = json_encoder.encode(clientdict) await self.db_pool.simple_update_one( table="ui_auth_sessions", keyvalues={"session_id": session_id}, updatevalues={"clientdict": clientdict_json}, desc="set_ui_auth_client_dict", )
def respond_with_json( request: Request, code: int, json_object: Any, send_cors: bool = False, pretty_print: bool = False, canonical_json: bool = True, ): """Sends encoded JSON in response to the given request. Args: request: The http request to respond to. code: The HTTP response code. json_object: The object to serialize to JSON. send_cors: Whether to send Cross-Origin Resource Sharing headers https://fetch.spec.whatwg.org/#http-cors-protocol pretty_print: Whether to include indentation and line-breaks in the resulting JSON bytes. canonical_json: Whether to use the canonicaljson algorithm when encoding the JSON bytes. Returns: twisted.web.server.NOT_DONE_YET if the request is still active. """ # could alternatively use request.notifyFinish() and flip a flag when # the Deferred fires, but since the flag is RIGHT THERE it seems like # a waste. if request._disconnected: logger.warning( "Not sending response to request %s, already disconnected.", request ) return None if pretty_print: json_bytes = encode_pretty_printed_json(json_object) + b"\n" else: if canonical_json or synapse.events.USE_FROZEN_DICTS: # canonicaljson already encodes to bytes json_bytes = encode_canonical_json(json_object) else: json_bytes = json_encoder.encode(json_object).encode("utf-8") return respond_with_json_bytes(request, code, json_bytes, send_cors=send_cors)
def _update_remote_device_list_cache_txn(self, txn: LoggingTransaction, user_id: str, devices: List[dict], stream_id: int) -> None: self.db_pool.simple_delete_txn(txn, table="device_lists_remote_cache", keyvalues={"user_id": user_id}) self.db_pool.simple_insert_many_txn( txn, table="device_lists_remote_cache", values=[{ "user_id": user_id, "device_id": content["device_id"], "content": json_encoder.encode(content), } for content in devices], ) txn.call_after(self.get_cached_devices_for_user.invalidate, (user_id, )) txn.call_after(self._get_cached_user_device.invalidate_many, (user_id, )) txn.call_after( self.get_device_list_last_stream_id_for_remote.invalidate, (user_id, )) self.db_pool.simple_upsert_txn( txn, table="device_lists_remote_extremeties", keyvalues={"user_id": user_id}, values={"stream_id": stream_id}, # we don't need to lock, because we can assume we are the only thread # updating this user's extremity. lock=False, ) # If we're replacing the remote user's device list cache presumably # we've done a full resync, so we remove the entry that says we need # to resync self.db_pool.simple_delete_txn( txn, table="device_lists_remote_resync", keyvalues={"user_id": user_id}, )
def _background_update_progress_txn( self, txn: "LoggingTransaction", update_name: str, progress: JsonDict ) -> None: """Update the progress of a background update Args: txn: The transaction. update_name: The name of the background update task progress: The progress of the update. """ progress_json = json_encoder.encode(progress) self.db_pool.simple_update_one_txn( txn, "background_updates", keyvalues={"update_name": update_name}, updatevalues={"progress_json": progress_json}, )
def _update_remote_device_list_cache_entry_txn( self, txn: LoggingTransaction, user_id: str, device_id: str, content: JsonDict, stream_id: int, ) -> None: if content.get("deleted"): self.db_pool.simple_delete_txn( txn, table="device_lists_remote_cache", keyvalues={"user_id": user_id, "device_id": device_id}, ) txn.call_after(self.device_id_exists_cache.invalidate, (user_id, device_id)) else: self.db_pool.simple_upsert_txn( txn, table="device_lists_remote_cache", keyvalues={"user_id": user_id, "device_id": device_id}, values={"content": json_encoder.encode(content)}, # we don't need to lock, because we assume we are the only thread # updating this user's devices. lock=False, ) txn.call_after(self._get_cached_user_device.invalidate, (user_id, device_id)) txn.call_after(self.get_cached_devices_for_user.invalidate, (user_id,)) txn.call_after( self.get_device_list_last_stream_id_for_remote.invalidate, (user_id,) ) self.db_pool.simple_upsert_txn( txn, table="device_lists_remote_extremeties", keyvalues={"user_id": user_id}, values={"stream_id": stream_id}, # again, we can assume we are the only thread updating this user's # extremity. lock=False, )
async def update_e2e_room_key( self, user_id: str, version: str, room_id: str, session_id: str, room_key: RoomKey, ) -> None: """Replaces the encrypted E2E room key for a given session in a given backup Args: user_id: the user whose backup we're setting version: the version ID of the backup we're updating room_id: the ID of the room whose keys we're setting session_id: the session whose room_key we're setting room_key: the room_key being set Raises: StoreError """ try: version_int = int(version) except ValueError: # Our versions are all ints so if we can't convert it to an integer, # it doesn't exist. raise StoreError(404, "No backup with that version exists") await self.db_pool.simple_update_one( table="e2e_room_keys", keyvalues={ "user_id": user_id, "version": version_int, "room_id": room_id, "session_id": session_id, }, updatevalues={ "first_message_index": room_key["first_message_index"], "forwarded_count": room_key["forwarded_count"], "is_verified": room_key["is_verified"], "session_data": json_encoder.encode(room_key["session_data"]), }, desc="update_e2e_room_key", )
def add_messages_txn(txn, now_ms, stream_id): # Add the local messages directly to the local inbox. self._add_messages_to_local_device_inbox_txn( txn, stream_id, local_messages_by_user_then_device) # Add the remote messages to the federation outbox. # We'll send them to a remote server when we next send a # federation transaction to that destination. self.db_pool.simple_insert_many_txn( txn, table="device_federation_outbox", values=[{ "destination": destination, "stream_id": stream_id, "queued_ts": now_ms, "messages_json": json_encoder.encode(edu), "instance_name": self._instance_name, } for destination, edu in remote_messages_by_destination.items()], )
def _set_ui_auth_session_data_txn(self, txn: LoggingTransaction, session_id: str, key: str, value: Any): # Get the current value. result = self.db_pool.simple_select_one_txn( txn, table="ui_auth_sessions", keyvalues={"session_id": session_id}, retcols=("serverdict", ), ) # type: Dict[str, Any] # type: ignore # Update it and add it back to the database. serverdict = db_to_json(result["serverdict"]) serverdict[key] = value self.db_pool.simple_update_one_txn( txn, table="ui_auth_sessions", keyvalues={"session_id": session_id}, updatevalues={"serverdict": json_encoder.encode(serverdict)}, )
async def update_e2e_room_keys_version( self, user_id: str, version: str, info: Optional[JsonDict] = None, version_etag: Optional[int] = None, ) -> None: """Update a given backup version Args: user_id: the user whose backup version we're updating version: the version ID of the backup version we're updating info: the new backup version info to store. If None, then the backup version info is not updated. version_etag: etag of the keys in the backup. If None, then the etag is not updated. """ updatevalues: Dict[str, object] = {} if info is not None and "auth_data" in info: updatevalues["auth_data"] = json_encoder.encode(info["auth_data"]) if version_etag is not None: updatevalues["etag"] = version_etag if updatevalues: try: version_int = int(version) except ValueError: # Our versions are all ints so if we can't convert it to an integer, # it doesn't exist. raise StoreError(404, "No backup with that version exists") await self.db_pool.simple_update_one( table="e2e_room_keys_versions", keyvalues={ "user_id": user_id, "version": version_int }, updatevalues=updatevalues, desc="update_e2e_room_keys_version", )
def _add_user_signature_change_txn( self, txn: LoggingTransaction, from_user_id: str, user_ids: List[str], stream_id: int, ) -> None: txn.call_after( self._user_signature_stream_cache.entity_has_changed, from_user_id, stream_id, ) self.db_pool.simple_insert_txn( txn, "user_signature_stream", values={ "stream_id": stream_id, "from_user_id": from_user_id, "user_ids": json_encoder.encode(user_ids), }, )
async def set(self, cache_name: str, key: str, value: Any, expiry_ms: int) -> None: """Add the key/value to the named cache, with the expiry time given.""" if self._redis_connection is None: return set_counter.labels(cache_name).inc() # txredisapi requires the value to be string, bytes or numbers, so we # encode stuff in JSON. encoded_value = json_encoder.encode(value) logger.debug("Caching %s %s: %r", cache_name, key, encoded_value) return await make_deferred_yieldable( self._redis_connection.set( self._get_redis_key(cache_name, key), encoded_value, pexpire=expiry_ms, ))
def _create_appservice_txn( txn: LoggingTransaction) -> AppServiceTransaction: new_txn_id = self._as_txn_seq_gen.get_next_id_txn(txn) # Insert new txn into txn table event_ids = json_encoder.encode([e.event_id for e in events]) txn.execute( "INSERT INTO application_services_txns(as_id, txn_id, event_ids) " "VALUES(?,?,?)", (service.id, new_txn_id, event_ids), ) return AppServiceTransaction( service=service, id=new_txn_id, events=events, ephemeral=ephemeral, to_device_messages=to_device_messages, one_time_key_counts=one_time_key_counts, unused_fallback_keys=unused_fallback_keys, device_list_summary=device_list_summary, )
async def add_e2e_room_keys(self, user_id, version, room_keys): """Bulk add room keys to a given backup. Args: user_id (str): the user whose backup we're adding to version (str): the version ID of the backup for the set of keys we're adding to room_keys (iterable[(str, str, dict)]): the keys to add, in the form (roomID, sessionID, keyData) """ values = [] for (room_id, session_id, room_key) in room_keys: values.append({ "user_id": user_id, "version": version, "room_id": room_id, "session_id": session_id, "first_message_index": room_key["first_message_index"], "forwarded_count": room_key["forwarded_count"], "is_verified": room_key["is_verified"], "session_data": json_encoder.encode(room_key["session_data"]), }) log_kv({ "message": "Set room key", "room_id": room_id, "session_id": session_id, "room_key": room_key, }) await self.db_pool.simple_insert_many(table="e2e_room_keys", values=values, desc="add_e2e_room_keys")