def delete_expired_event_txn(txn): # Delete the expiry timestamp associated with this event from the database. self._delete_event_expiry_txn(txn, event_id) if not event: # If we can't find the event, log a warning and delete the expiry date # from the database so that we don't try to expire it again in the # future. logger.warning( "Can't expire event %s because we don't have it.", event_id ) return # Prune the event's dict then convert it to JSON. pruned_json = json_encoder.encode( prune_event_dict(event.room_version, event.get_dict()) ) # Update the event_json table to replace the event's JSON with the pruned # JSON. self._censor_event_txn(txn, event.event_id, pruned_json) # We need to invalidate the event cache entry for this event because we # changed its content in the database. We can't call # self._invalidate_cache_and_stream because self.get_event_cache isn't of the # right type. txn.call_after(self._get_event_cache.invalidate, (event.event_id,)) # Send that invalidation to replication so that other workers also invalidate # the event cache. self._send_invalidation_to_replication( txn, "_get_event_cache", (event.event_id,) )
def compute_event_signature( room_version: RoomVersion, event_dict: JsonDict, signature_name: str, signing_key: SigningKey, ) -> Dict[str, Dict[str, str]]: """Compute the signature of the event for the given name and key. Args: room_version: the version of the room that this event is in. (the room version determines the redaction algorithm and hence the json to be signed) event_dict: The event as a dict signature_name: The name of the entity signing the event (typically the server's hostname). signing_key: The key to sign with Returns: a dictionary in the same format of an event's signatures field. """ redact_json = prune_event_dict(event_dict) redact_json.pop("age_ts", None) redact_json.pop("unsigned", None) if logger.isEnabledFor(logging.DEBUG): logger.debug("Signing event: %s", encode_canonical_json(redact_json)) redact_json = sign_json(redact_json, signature_name, signing_key) if logger.isEnabledFor(logging.DEBUG): logger.debug("Signed event: %s", encode_canonical_json(redact_json)) return redact_json["signatures"]
def from_event( server_name: str, event: EventBase, minimum_valid_until_ms: int, ) -> "VerifyJsonRequest": """Create a VerifyJsonRequest to verify all signatures on an event object for the given server. """ key_ids = list(event.signatures.get(server_name, [])) return VerifyJsonRequest( server_name, # We defer creating the redacted json object, as it uses a lot more # memory than the Event object itself. lambda: prune_event_dict(event.room_version, event.get_pdu_json()), minimum_valid_until_ms, key_ids=key_ids, )
def compute_event_signature(event_dict, signature_name, signing_key): """Compute the signature of the event for the given name and key. Args: event_dict (dict): The event as a dict signature_name (str): The name of the entity signing the event (typically the server's hostname). signing_key (syutil.crypto.SigningKey): The key to sign with Returns: dict[str, dict[str, str]]: Returns a dictionary in the same format of an event's signatures field. """ redact_json = prune_event_dict(event_dict) redact_json.pop("age_ts", None) redact_json.pop("unsigned", None) logger.debug("Signing event: %s", encode_canonical_json(redact_json)) redact_json = sign_json(redact_json, signature_name, signing_key) logger.debug("Signed event: %s", encode_canonical_json(redact_json)) return redact_json["signatures"]
async def _censor_redactions(self): """Censors all redactions older than the configured period that haven't been censored yet. By censor we mean update the event_json table with the redacted event. """ if self.hs.config.server.redaction_retention_period is None: return if not ( await self.db_pool.updates.has_completed_background_update( "redactions_have_censored_ts_idx" ) ): # We don't want to run this until the appropriate index has been # created. return before_ts = ( self._clock.time_msec() - self.hs.config.server.redaction_retention_period ) # We fetch all redactions that: # 1. point to an event we have, # 2. has a received_ts from before the cut off, and # 3. we haven't yet censored. # # This is limited to 100 events to ensure that we don't try and do too # much at once. We'll get called again so this should eventually catch # up. sql = """ SELECT redactions.event_id, redacts FROM redactions LEFT JOIN events AS original_event ON ( redacts = original_event.event_id ) WHERE NOT have_censored AND redactions.received_ts <= ? ORDER BY redactions.received_ts ASC LIMIT ? """ rows = await self.db_pool.execute( "_censor_redactions_fetch", None, sql, before_ts, 100 ) updates = [] for redaction_id, event_id in rows: redaction_event = await self.get_event(redaction_id, allow_none=True) original_event = await self.get_event( event_id, allow_rejected=True, allow_none=True ) # The SQL above ensures that we have both the redaction and # original event, so if the `get_event` calls return None it # means that the redaction wasn't allowed. Either way we know that # the result won't change so we mark the fact that we've checked. if ( redaction_event and original_event and original_event.internal_metadata.is_redacted() ): # Redaction was allowed pruned_json = json_encoder.encode( prune_event_dict( original_event.room_version, original_event.get_dict() ) ) else: # Redaction wasn't allowed pruned_json = None updates.append((redaction_id, event_id, pruned_json)) def _update_censor_txn(txn): for redaction_id, event_id, pruned_json in updates: if pruned_json: self._censor_event_txn(txn, event_id, pruned_json) self.db_pool.simple_update_one_txn( txn, table="redactions", keyvalues={"event_id": redaction_id}, updatevalues={"have_censored": True}, ) await self.db_pool.runInteraction("_update_censor_txn", _update_censor_txn)