def get_account_data_for_user_txn(txn): rows = self.db_pool.simple_select_list_txn( txn, "account_data", {"user_id": user_id}, ["account_data_type", "content"], ) global_account_data = { row["account_data_type"]: db_to_json(row["content"]) for row in rows } rows = self.db_pool.simple_select_list_txn( txn, "room_account_data", {"user_id": user_id}, ["room_id", "account_data_type", "content"], ) by_room = {} for row in rows: room_data = by_room.setdefault(row["room_id"], {}) room_data[row["account_data_type"]] = db_to_json( row["content"]) return global_account_data, by_room
def get_account_data_for_user_txn( txn: LoggingTransaction, ) -> Tuple[Dict[str, JsonDict], Dict[str, Dict[str, JsonDict]]]: rows = self.db_pool.simple_select_list_txn( txn, "account_data", {"user_id": user_id}, ["account_data_type", "content"], ) global_account_data = { row["account_data_type"]: db_to_json(row["content"]) for row in rows } rows = self.db_pool.simple_select_list_txn( txn, "room_account_data", {"user_id": user_id}, ["room_id", "account_data_type", "content"], ) by_room: Dict[str, Dict[str, JsonDict]] = {} for row in rows: room_data = by_room.setdefault(row["room_id"], {}) room_data[row["account_data_type"]] = db_to_json( row["content"]) return global_account_data, by_room
def _load_rules( rawrules: List[JsonDict], enabled_map: Dict[str, bool], experimental_config: ExperimentalConfig, ) -> List[JsonDict]: ruleslist = [] for rawrule in rawrules: rule = dict(rawrule) rule["conditions"] = db_to_json(rawrule["conditions"]) rule["actions"] = db_to_json(rawrule["actions"]) rule["default"] = False ruleslist.append(rule) # We're going to be mutating this a lot, so copy it. We also filter out # any experimental default push rules that aren't enabled. rules = [ rule for rule in list_with_base_rules(ruleslist) if _is_experimental_rule_enabled(rule["rule_id"], experimental_config) ] for i, rule in enumerate(rules): rule_id = rule["rule_id"] if rule_id not in enabled_map: continue if rule.get("enabled", True) == bool(enabled_map[rule_id]): continue # Rules are cached across users. rule = dict(rule) rule["enabled"] = bool(enabled_map[rule_id]) rules[i] = rule return rules
def _get_device_update_edus_by_remote( self, destination, from_stream_id, query_map, ): """Returns a list of device update EDUs as well as E2EE keys Args: destination (str): The host the device updates are intended for from_stream_id (int): The minimum stream_id to filter updates by, exclusive query_map (Dict[(str, str): int]): Dictionary mapping user_id/device_id to update stream_id Returns: List[Dict]: List of objects representing an device update EDU """ devices = yield self.runInteraction( "_get_e2e_device_keys_txn", self._get_e2e_device_keys_txn, query_map.keys(), include_all_devices=True, include_deleted_devices=True, ) results = [] for user_id, user_devices in iteritems(devices): # The prev_id for the first row is always the last row before # `from_stream_id` prev_id = yield self._get_last_device_update_for_remote_user( destination, user_id, from_stream_id, ) for device_id, device in iteritems(user_devices): stream_id = query_map[(user_id, device_id)] result = { "user_id": user_id, "device_id": device_id, "prev_id": [prev_id] if prev_id else [], "stream_id": stream_id, } prev_id = stream_id if device is not None: key_json = device.get("key_json", None) if key_json: result["keys"] = db_to_json(key_json) device_display_name = device.get("device_display_name", None) if device_display_name: result["device_display_name"] = device_display_name else: result["deleted"] = True results.append(result) defer.returnValue(results)
def get_new_messages_for_remote_destination_txn( txn: LoggingTransaction, ) -> Tuple[List[JsonDict], int]: sql = ( "SELECT stream_id, messages_json FROM device_federation_outbox" " WHERE destination = ?" " AND ? < stream_id AND stream_id <= ?" " ORDER BY stream_id ASC" " LIMIT ?") txn.execute( sql, (destination, last_stream_id, current_stream_id, limit)) messages = [] stream_pos = current_stream_id for row in txn: stream_pos = row[0] messages.append(db_to_json(row[1])) # If the limit was not reached we know that there's no more data for this # user/device pair up to current_stream_id. if len(messages) < limit: log_kv({"message": "Set stream position to current position"}) stream_pos = current_stream_id return messages, stream_pos
def _get_received_txn_response( self, txn: LoggingTransaction, transaction_id: str, origin: str) -> Optional[Tuple[int, JsonDict]]: result = self.db_pool.simple_select_one_txn( txn, table="received_transactions", keyvalues={ "transaction_id": transaction_id, "origin": origin }, retcols=( "transaction_id", "origin", "ts", "response_code", "response_json", "has_been_referenced", ), allow_none=True, ) if result and result["response_code"]: return result["response_code"], db_to_json(result["response_json"]) else: return None
def _get_devices_with_keys_by_user_txn(self, txn, user_id): now_stream_id = self._device_list_id_gen.get_current_token() devices = self._get_e2e_device_keys_txn( txn, [(user_id, None)], include_all_devices=True ) if devices: user_devices = devices[user_id] results = [] for device_id, device in iteritems(user_devices): result = {"device_id": device_id} key_json = device.get("key_json", None) if key_json: result["keys"] = db_to_json(key_json) device_display_name = device.get("device_display_name", None) if device_display_name: result["device_display_name"] = device_display_name results.append(result) return now_stream_id, results return now_stream_id, []
def _get_e2e_room_keys_version_info_txn( txn: LoggingTransaction) -> JsonDict: if version is None: this_version = self._get_current_version(txn, user_id) else: try: this_version = int(version) except ValueError: # Our versions are all ints so if we can't convert it to an integer, # it isn't there. raise StoreError(404, "No backup with that version exists") result = self.db_pool.simple_select_one_txn( txn, table="e2e_room_keys_versions", keyvalues={ "user_id": user_id, "version": this_version, "deleted": 0 }, retcols=("version", "algorithm", "auth_data", "etag"), allow_none=False, ) assert result is not None # see comment on `simple_select_one_txn` result["auth_data"] = db_to_json(result["auth_data"]) result["version"] = str(result["version"]) if result["etag"] is None: result["etag"] = 0 return result
def _get_devices_with_keys_by_user_txn(self, txn, user_id): now_stream_id = self._device_list_id_gen.get_current_token() devices = self._get_e2e_device_keys_txn( txn, [(user_id, None)], include_all_devices=True ) if devices: user_devices = devices[user_id] results = [] for device_id, device in user_devices.items(): result = {"device_id": device_id} key_json = device.get("key_json", None) if key_json: result["keys"] = db_to_json(key_json) if "signatures" in device: for sig_user_id, sigs in device["signatures"].items(): result["keys"].setdefault("signatures", {}).setdefault( sig_user_id, {} ).update(sigs) device_display_name = device.get("device_display_name", None) if device_display_name: result["device_display_name"] = device_display_name results.append(result) return now_stream_id, results return now_stream_id, []
async def get_linearized_receipts_for_all_rooms( self, to_key: int, from_key: Optional[int] = None) -> Dict[str, JsonDict]: """Get receipts for all rooms between two stream_ids, up to a limit of the latest 100 read receipts. Args: to_key: Max stream id to fetch receipts up to. from_key: Min stream id to fetch receipts from. None fetches from the start. Returns: A dictionary of roomids to a list of receipts. """ def f(txn): if from_key: sql = """ SELECT * FROM receipts_linearized WHERE stream_id > ? AND stream_id <= ? ORDER BY stream_id DESC LIMIT 100 """ txn.execute(sql, [from_key, to_key]) else: sql = """ SELECT * FROM receipts_linearized WHERE stream_id <= ? ORDER BY stream_id DESC LIMIT 100 """ txn.execute(sql, [to_key]) return self.db_pool.cursor_to_dict(txn) txn_results = await self.db_pool.runInteraction( "get_linearized_receipts_for_all_rooms", f) results = {} for row in txn_results: # We want a single event per room, since we want to batch the # receipts by room, event and type. room_event = results.setdefault( row["room_id"], { "type": "m.receipt", "room_id": row["room_id"], "content": {} }, ) # The content is of the form: # {"$foo:bar": { "read": { "@user:host": <receipt> }, .. }, .. } event_entry = room_event["content"].setdefault(row["event_id"], {}) receipt_type = event_entry.setdefault(row["receipt_type"], {}) receipt_type[row["user_id"]] = db_to_json(row["data"]) return results
def get_rejected_events( txn: Cursor, ) -> List[Tuple[str, str, JsonDict, bool, bool]]: # Fetch rejected event json, their room version and whether we have # inserted them into the state_events or auth_events tables. # # Note we can assume that events that don't have a corresponding # room version are V1 rooms. sql = """ SELECT DISTINCT event_id, COALESCE(room_version, '1'), json, state_events.event_id IS NOT NULL, event_auth.event_id IS NOT NULL FROM rejections INNER JOIN event_json USING (event_id) LEFT JOIN rooms USING (room_id) LEFT JOIN state_events USING (event_id) LEFT JOIN event_auth USING (event_id) WHERE event_id > ? ORDER BY event_id LIMIT ? """ txn.execute( sql, ( last_event_id, batch_size, ), ) return [(row[0], row[1], db_to_json(row[2]), row[3], row[4]) for row in txn] # type: ignore
def run_create(cur: Cursor, database_engine: BaseDatabaseEngine, *args, **kwargs): logger.info("Creating ignored_users table") execute_statements_from_stream(cur, StringIO(_create_commands)) # We now upgrade existing data, if any. We don't do this in `run_upgrade` as # we a) want to run these before adding constraints and b) `run_upgrade` is # not run on empty databases. insert_sql = """ INSERT INTO ignored_users (ignorer_user_id, ignored_user_id) VALUES (?, ?) """ logger.info("Converting existing ignore lists") cur.execute( "SELECT user_id, content FROM account_data WHERE account_data_type = 'm.ignored_user_list'" ) for user_id, content_json in cur.fetchall(): content = db_to_json(content_json) # The content should be the form of a dictionary with a key # "ignored_users" pointing to a dictionary with keys of ignored users. # # { "ignored_users": "@someone:example.org": {} } ignored_users = content.get("ignored_users", {}) if isinstance(ignored_users, dict) and ignored_users: cur.execute_batch(insert_sql, [(user_id, u) for u in ignored_users]) # Add indexes after inserting data for efficiency. logger.info("Adding constraints to ignored_users table") execute_statements_from_stream(cur, StringIO(_constraints_commands))
def _get_rooms_for_summary_txn(txn): keyvalues = {"group_id": group_id} if not include_private: keyvalues["is_public"] = True sql = """ SELECT room_id, is_public, category_id, room_order FROM group_summary_rooms WHERE group_id = ? AND room_id IN ( SELECT group_rooms.room_id FROM group_rooms LEFT JOIN room_stats_current ON group_rooms.room_id = room_stats_current.room_id AND joined_members > 0 AND local_users_in_room > 0 LEFT JOIN rooms ON group_rooms.room_id = rooms.room_id AND (room_version <> '') = ? ) """ if not include_private: sql += " AND is_public = ?" txn.execute(sql, (group_id, False, True)) else: txn.execute(sql, (group_id, False)) rooms = [ { "room_id": row[0], "is_public": row[1], "category_id": row[2] if row[2] != _DEFAULT_CATEGORY_ID else None, "order": row[3], } for row in txn ] sql = """ SELECT category_id, is_public, profile, cat_order FROM group_summary_room_categories INNER JOIN group_room_categories USING (group_id, category_id) WHERE group_id = ? """ if not include_private: sql += " AND is_public = ?" txn.execute(sql, (group_id, True)) else: txn.execute(sql, (group_id,)) categories = { row[0]: { "is_public": row[1], "profile": db_to_json(row[2]), "order": row[3], } for row in txn } return rooms, categories
async def get_users_whose_signatures_changed(self, user_id: str, from_key: int) -> Set[str]: """Get the users who have new cross-signing signatures made by `user_id` since `from_key`. Args: user_id: the user who made the signatures from_key: The device lists stream token Returns: A set of user IDs with updated signatures. """ if self._user_signature_stream_cache.has_entity_changed( user_id, from_key): sql = """ SELECT DISTINCT user_ids FROM user_signature_stream WHERE from_user_id = ? AND stream_id > ? """ rows = await self.db_pool.execute( "get_users_whose_signatures_changed", None, sql, user_id, from_key) return {user for row in rows for user in db_to_json(row[0])} else: return set()
def get_all_updated_receipts_txn( txn: LoggingTransaction, ) -> Tuple[List[Tuple[int, list]], int, bool]: sql = """ SELECT stream_id, room_id, receipt_type, user_id, event_id, data FROM receipts_linearized WHERE ? < stream_id AND stream_id <= ? ORDER BY stream_id ASC LIMIT ? """ txn.execute(sql, (last_id, current_id, limit)) updates = cast( List[Tuple[int, list]], [(r[0], r[1:5] + (db_to_json(r[5]),)) for r in txn], ) limited = False upper_bound = current_id if len(updates) == limit: limited = True upper_bound = updates[-1][0] return updates, upper_bound, limited
def _get_e2e_room_keys_version_info_txn(txn): if version is None: this_version = self._get_current_version(txn, user_id) else: try: this_version = int(version) except ValueError: # Our versions are all ints so if we can't convert it to an integer, # it isn't there. raise StoreError(404, "No row found") result = self.db_pool.simple_select_one_txn( txn, table="e2e_room_keys_versions", keyvalues={ "user_id": user_id, "version": this_version, "deleted": 0 }, retcols=("version", "algorithm", "auth_data", "etag"), ) result["auth_data"] = db_to_json(result["auth_data"]) result["version"] = str(result["version"]) if result["etag"] is None: result["etag"] = 0 return result
async def _get_linearized_receipts_for_room( self, room_id: str, to_key: int, from_key: Optional[int] = None) -> List[dict]: """See get_linearized_receipts_for_room""" def f(txn): if from_key: sql = ("SELECT * FROM receipts_linearized WHERE" " room_id = ? AND stream_id > ? AND stream_id <= ?") txn.execute(sql, (room_id, from_key, to_key)) else: sql = ("SELECT * FROM receipts_linearized WHERE" " room_id = ? AND stream_id <= ?") txn.execute(sql, (room_id, to_key)) rows = self.db_pool.cursor_to_dict(txn) return rows rows = await self.db_pool.runInteraction( "get_linearized_receipts_for_room", f) if not rows: return [] content = {} for row in rows: content.setdefault(row["event_id"], {}).setdefault( row["receipt_type"], {})[row["user_id"]] = db_to_json(row["data"]) return [{"type": "m.receipt", "room_id": room_id, "content": content}]
async def _get_linearized_receipts_for_rooms( self, room_ids: Collection[str], to_key: int, from_key: Optional[int] = None) -> Dict[str, List[JsonDict]]: if not room_ids: return {} def f(txn: LoggingTransaction) -> List[Dict[str, Any]]: if from_key: sql = """ SELECT * FROM receipts_linearized WHERE stream_id > ? AND stream_id <= ? AND """ clause, args = make_in_list_sql_clause(self.database_engine, "room_id", room_ids) txn.execute(sql + clause, [from_key, to_key] + list(args)) else: sql = """ SELECT * FROM receipts_linearized WHERE stream_id <= ? AND """ clause, args = make_in_list_sql_clause(self.database_engine, "room_id", room_ids) txn.execute(sql + clause, [to_key] + list(args)) return self.db_pool.cursor_to_dict(txn) txn_results = await self.db_pool.runInteraction( "_get_linearized_receipts_for_rooms", f) results: JsonDict = {} for row in txn_results: # We want a single event per room, since we want to batch the # receipts by room, event and type. room_event = results.setdefault( row["room_id"], { "type": EduTypes.RECEIPT, "room_id": row["room_id"], "content": {} }, ) # The content is of the form: # {"$foo:bar": { "read": { "@user:host": <receipt> }, .. }, .. } event_entry = room_event["content"].setdefault(row["event_id"], {}) receipt_type = event_entry.setdefault(row["receipt_type"], {}) receipt_type[row["user_id"]] = db_to_json(row["data"]) results = { room_id: [results[room_id]] if room_id in results else [] for room_id in room_ids } return results
async def _do_background_update(self, desired_duration_ms: float) -> int: assert self._current_background_update is not None update_name = self._current_background_update logger.info("Starting update batch on background update '%s'", update_name) update_handler = self._background_update_handlers[update_name].callback performance = self._background_update_performance.get(update_name) if performance is None: performance = BackgroundUpdatePerformance(update_name) self._background_update_performance[update_name] = performance items_per_ms = performance.average_items_per_ms() if items_per_ms is not None: batch_size = int(desired_duration_ms * items_per_ms) # Clamp the batch size so that we always make progress batch_size = max( batch_size, await self._min_batch_size(update_name, self._database_name), ) else: batch_size = await self._default_batch_size( update_name, self._database_name) progress_json = await self.db_pool.simple_select_one_onecol( "background_updates", keyvalues={"update_name": update_name}, retcol="progress_json", ) # Avoid a circular import. from synapse.storage._base import db_to_json progress = db_to_json(progress_json) time_start = self._clock.time_msec() items_updated = await update_handler(progress, batch_size) time_stop = self._clock.time_msec() duration_ms = time_stop - time_start performance.update(items_updated, duration_ms) logger.info( "Running background update %r. Processed %r items in %rms." " (total_rate=%r/ms, current_rate=%r/ms, total_updated=%r, batch_size=%r)", update_name, items_updated, duration_ms, performance.total_items_per_ms(), performance.average_items_per_ms(), performance.total_item_count, batch_size, ) return len(self._background_update_performance)
def _get_cached_user_device(self, user_id, device_id): content = yield self._simple_select_one_onecol( table="device_lists_remote_cache", keyvalues={"user_id": user_id, "device_id": device_id}, retcol="content", desc="_get_cached_user_device", ) defer.returnValue(db_to_json(content))
def _get_cached_user_device(self, user_id, device_id): content = yield self.db.simple_select_one_onecol( table="device_lists_remote_cache", keyvalues={"user_id": user_id, "device_id": device_id}, retcol="content", desc="_get_cached_user_device", ) return db_to_json(content)
async def _get_cached_user_device(self, user_id: str, device_id: str) -> JsonDict: content = await self.db_pool.simple_select_one_onecol( table="device_lists_remote_cache", keyvalues={"user_id": user_id, "device_id": device_id}, retcol="content", desc="_get_cached_user_device", ) return db_to_json(content)
def _get_e2e_device_keys_txn( self, txn, query_list, include_all_devices=False, include_deleted_devices=False ) -> Dict[str, Dict[str, Optional[DeviceKeyLookupResult]]]: """Get information on devices from the database The results include the device's keys and self-signatures, but *not* any cross-signing signatures which have been added subsequently (for which, see get_e2e_device_keys_and_signatures) """ query_clauses = [] query_params = [] if include_all_devices is False: include_deleted_devices = False if include_deleted_devices: deleted_devices = set(query_list) for (user_id, device_id) in query_list: query_clause = "user_id = ?" query_params.append(user_id) if device_id is not None: query_clause += " AND device_id = ?" query_params.append(device_id) query_clauses.append(query_clause) sql = ("SELECT user_id, device_id, " " d.display_name, " " k.key_json" " FROM devices d" " %s JOIN e2e_device_keys_json k USING (user_id, device_id)" " WHERE %s AND NOT d.hidden") % ( "LEFT" if include_all_devices else "INNER", " OR ".join("(" + q + ")" for q in query_clauses), ) txn.execute(sql, query_params) result = { } # type: Dict[str, Dict[str, Optional[DeviceKeyLookupResult]]] for (user_id, device_id, display_name, key_json) in txn: if include_deleted_devices: deleted_devices.remove((user_id, device_id)) result.setdefault(user_id, {})[device_id] = DeviceKeyLookupResult( display_name, db_to_json(key_json) if key_json else None) if include_deleted_devices: for user_id, device_id in deleted_devices: result.setdefault(user_id, {})[device_id] = None return result
def reindex_search_txn(txn): sql = ("SELECT stream_ordering, event_id FROM events" " WHERE ? <= stream_ordering AND stream_ordering < ?" " ORDER BY stream_ordering DESC" " LIMIT ?") txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size)) rows = txn.fetchall() if not rows: return 0 min_stream_id = rows[-1][0] event_ids = [row[1] for row in rows] rows_to_update = [] chunks = [ event_ids[i:i + 100] for i in range(0, len(event_ids), 100) ] for chunk in chunks: ev_rows = self.db_pool.simple_select_many_txn( txn, table="event_json", column="event_id", iterable=chunk, retcols=["event_id", "json"], keyvalues={}, ) for row in ev_rows: event_id = row["event_id"] event_json = db_to_json(row["json"]) try: origin_server_ts = event_json["origin_server_ts"] except (KeyError, AttributeError): # If the event is missing a necessary field then # skip over it. continue rows_to_update.append((origin_server_ts, event_id)) sql = "UPDATE events SET origin_server_ts = ? WHERE event_id = ?" for index in range(0, len(rows_to_update), INSERT_CLUMP_SIZE): clump = rows_to_update[index:index + INSERT_CLUMP_SIZE] txn.executemany(sql, clump) progress = { "target_min_stream_id_inclusive": target_min_stream_id, "max_stream_id_exclusive": min_stream_id, "rows_inserted": rows_inserted + len(rows_to_update), } self.db_pool.updates._background_update_progress_txn( txn, self.EVENT_ORIGIN_SERVER_TS_NAME, progress) return len(rows_to_update)
def _get_linearized_receipts_for_rooms(self, room_ids, to_key, from_key=None): if not room_ids: return {} def f(txn): if from_key: sql = """ SELECT * FROM receipts_linearized WHERE stream_id > ? AND stream_id <= ? AND """ clause, args = make_in_list_sql_clause(self.database_engine, "room_id", room_ids) txn.execute(sql + clause, [from_key, to_key] + list(args)) else: sql = """ SELECT * FROM receipts_linearized WHERE stream_id <= ? AND """ clause, args = make_in_list_sql_clause(self.database_engine, "room_id", room_ids) txn.execute(sql + clause, [to_key] + list(args)) return self.db.cursor_to_dict(txn) txn_results = yield self.db.runInteraction( "_get_linearized_receipts_for_rooms", f) results = {} for row in txn_results: # We want a single event per room, since we want to batch the # receipts by room, event and type. room_event = results.setdefault( row["room_id"], { "type": "m.receipt", "room_id": row["room_id"], "content": {} }, ) # The content is of the form: # {"$foo:bar": { "read": { "@user:host": <receipt> }, .. }, .. } event_entry = room_event["content"].setdefault(row["event_id"], {}) receipt_type = event_entry.setdefault(row["receipt_type"], {}) receipt_type[row["user_id"]] = db_to_json(row["data"]) results = { room_id: [results[room_id]] if room_id in results else [] for room_id in room_ids } return results
def _deserialize_action(actions, is_highlight): """Custom deserializer for actions. This allows us to "compress" common actions""" if actions: return db_to_json(actions) if is_highlight: return DEFAULT_HIGHLIGHT_ACTION else: return DEFAULT_NOTIF_ACTION
def _event_thread_relation_txn(txn: LoggingTransaction) -> int: txn.execute( """ SELECT event_id, json FROM event_json LEFT JOIN event_relations USING (event_id) WHERE event_id > ? AND event_relations.event_id IS NULL ORDER BY event_id LIMIT ? """, (last_event_id, batch_size), ) results = list(txn) missing_thread_relations = [] for (event_id, event_json_raw) in results: try: event_json = db_to_json(event_json_raw) except Exception as e: logger.warning( "Unable to load event %s (no relations will be updated): %s", event_id, e, ) continue # If there's no relation (or it is not a thread), skip! relates_to = event_json["content"].get("m.relates_to") if not relates_to or not isinstance(relates_to, dict): continue if relates_to.get("rel_type") != RelationTypes.THREAD: continue # Get the parent ID. parent_id = relates_to.get("event_id") if not isinstance(parent_id, str): continue missing_thread_relations.append((event_id, parent_id)) # Insert the missing data. self.db_pool.simple_insert_many_txn( txn=txn, table="event_relations", values=[{ "event_id": event_id, "relates_to_Id": parent_id, "relation_type": RelationTypes.THREAD, } for event_id, parent_id in missing_thread_relations], ) if results: latest_event_id = results[-1][0] self.db_pool.updates._background_update_progress_txn( txn, "event_thread_relation", {"last_event_id": latest_event_id}) return len(results)
def _deserialize_action(actions: str, is_highlight: bool) -> List[Union[dict, str]]: """Custom deserializer for actions. This allows us to "compress" common actions""" if actions: return db_to_json(actions) if is_highlight: return DEFAULT_HIGHLIGHT_ACTION else: return DEFAULT_NOTIF_ACTION
async def get_cached_devices_for_user(self, user_id: str) -> Dict[str, JsonDict]: devices = await self.db_pool.simple_select_list( table="device_lists_remote_cache", keyvalues={"user_id": user_id}, retcols=("device_id", "content"), desc="get_cached_devices_for_user", ) return { device["device_id"]: db_to_json(device["content"]) for device in devices }
def _get_cached_devices_for_user(self, user_id): devices = yield self._simple_select_list( table="device_lists_remote_cache", keyvalues={"user_id": user_id}, retcols=("device_id", "content"), desc="_get_cached_devices_for_user", ) defer.returnValue( {device["device_id"]: db_to_json(device["content"]) for device in devices} )
def get_cached_devices_for_user(self, user_id): devices = yield self.db.simple_select_list( table="device_lists_remote_cache", keyvalues={"user_id": user_id}, retcols=("device_id", "content"), desc="get_cached_devices_for_user", ) return { device["device_id"]: db_to_json(device["content"]) for device in devices }
def get_updated_account_data_for_user_txn(txn): sql = ("SELECT account_data_type, content FROM account_data" " WHERE user_id = ? AND stream_id > ?") txn.execute(sql, (user_id, stream_id)) global_account_data = {row[0]: db_to_json(row[1]) for row in txn} sql = ( "SELECT room_id, account_data_type, content FROM room_account_data" " WHERE user_id = ? AND stream_id > ?") txn.execute(sql, (user_id, stream_id)) account_data_by_room = {} for row in txn: room_account_data = account_data_by_room.setdefault(row[0], {}) room_account_data[row[1]] = db_to_json(row[2]) return global_account_data, account_data_by_room
def _get_devices_by_remote_txn( self, txn, destination, from_stream_id, now_stream_id ): sql = """ SELECT user_id, device_id, max(stream_id) FROM device_lists_outbound_pokes WHERE destination = ? AND ? < stream_id AND stream_id <= ? AND sent = ? GROUP BY user_id, device_id LIMIT 20 """ txn.execute(sql, (destination, from_stream_id, now_stream_id, False)) # maps (user_id, device_id) -> stream_id query_map = {(r[0], r[1]): r[2] for r in txn} if not query_map: return (now_stream_id, []) if len(query_map) >= 20: now_stream_id = max(stream_id for stream_id in itervalues(query_map)) devices = self._get_e2e_device_keys_txn( txn, query_map.keys(), include_all_devices=True, include_deleted_devices=True, ) prev_sent_id_sql = """ SELECT coalesce(max(stream_id), 0) as stream_id FROM device_lists_outbound_last_success WHERE destination = ? AND user_id = ? AND stream_id <= ? """ results = [] for user_id, user_devices in iteritems(devices): # The prev_id for the first row is always the last row before # `from_stream_id` txn.execute(prev_sent_id_sql, (destination, user_id, from_stream_id)) rows = txn.fetchall() prev_id = rows[0][0] for device_id, device in iteritems(user_devices): stream_id = query_map[(user_id, device_id)] result = { "user_id": user_id, "device_id": device_id, "prev_id": [prev_id] if prev_id else [], "stream_id": stream_id, } prev_id = stream_id if device is not None: key_json = device.get("key_json", None) if key_json: result["keys"] = db_to_json(key_json) device_display_name = device.get("device_display_name", None) if device_display_name: result["device_display_name"] = device_display_name else: result["deleted"] = True results.append(result) return (now_stream_id, results)