def create_index_sqlite(conn: Connection) -> None: # Sqlite doesn't support concurrent creation of indexes. # # We don't use partial indices on SQLite as it wasn't introduced # until 3.8, and wheezy and CentOS 7 have 3.7 # # We assume that sqlite doesn't give us invalid indices; however # we may still end up with the index existing but the # background_updates not having been recorded if synapse got shut # down at the wrong moment - hance we use IF NOT EXISTS. (SQLite # has supported CREATE TABLE|INDEX IF NOT EXISTS since 3.3.0.) sql = ( "CREATE %(unique)s INDEX IF NOT EXISTS %(name)s ON %(table)s" " (%(columns)s)") % { "unique": "UNIQUE" if unique else "", "name": index_name, "table": table, "columns": ", ".join(columns), } c = conn.cursor() logger.debug("[SQL] %s", sql) c.execute(sql) if replaces_index is not None: # We drop the old index as the new index has now been created. sql = f"DROP INDEX IF EXISTS {replaces_index}" logger.debug("[SQL] %s", sql) c.execute(sql)
def attempt_to_set_isolation_level( self, conn: Connection, isolation_level: Optional[int] ): if isolation_level is None: isolation_level = self.default_isolation_level else: isolation_level = self.isolation_level_map[isolation_level] return conn.set_isolation_level(isolation_level) # type: ignore
def create_index_psql(conn: Connection) -> None: conn.rollback() # postgres insists on autocommit for the index conn.set_session(autocommit=True) # type: ignore try: c = conn.cursor() # If a previous attempt to create the index was interrupted, # we may already have a half-built index. Let's just drop it # before trying to create it again. sql = "DROP INDEX IF EXISTS %s" % (index_name,) logger.debug("[SQL] %s", sql) c.execute(sql) sql = ( "CREATE %(unique)s INDEX CONCURRENTLY %(name)s" " ON %(table)s" " (%(columns)s) %(where_clause)s" ) % { "unique": "UNIQUE" if unique else "", "name": index_name, "table": table, "columns": ", ".join(columns), "where_clause": "WHERE " + where_clause if where_clause else "", } logger.debug("[SQL] %s", sql) c.execute(sql) finally: conn.set_session(autocommit=False) # type: ignore
def _get_active_presence(self, db_conn: Connection): """Fetch non-offline presence from the database so that we can register the appropriate time outs. """ sql = ( "SELECT user_id, state, last_active_ts, last_federation_update_ts," " last_user_sync_ts, status_msg, currently_active FROM presence_stream" " WHERE state != ?") txn = db_conn.cursor() txn.execute(sql, (PresenceState.OFFLINE, )) rows = self.db_pool.cursor_to_dict(txn) txn.close() for row in rows: row["currently_active"] = bool(row["currently_active"]) return [UserPresenceState(**row) for row in rows]
def attempt_to_set_autocommit(self, conn: Connection, autocommit: bool): return conn.set_session(autocommit=autocommit) # type: ignore
def prepare_database( db_conn: Connection, database_engine: BaseDatabaseEngine, config: Optional[HomeServerConfig], databases: Collection[str] = ["main", "state"], ): """Prepares a physical database for usage. Will either create all necessary tables or upgrade from an older schema version. If `config` is None then prepare_database will assert that no upgrade is necessary, *or* will create a fresh database if the database is empty. Args: db_conn: database_engine: config : application config, or None if we are connecting to an existing database which we expect to be configured already databases: The name of the databases that will be used with this physical database. Defaults to all databases. """ try: cur = db_conn.cursor() # sqlite does not automatically start transactions for DDL / SELECT statements, # so we start one before running anything. This ensures that any upgrades # are either applied completely, or not at all. # # (psycopg2 automatically starts a transaction as soon as we run any statements # at all, so this is redundant but harmless there.) cur.execute("BEGIN TRANSACTION") logger.info("%r: Checking existing schema version", databases) version_info = _get_or_create_schema_state(cur, database_engine) if version_info: user_version, delta_files, upgraded = version_info logger.info( "%r: Existing schema is %i (+%i deltas)", databases, user_version, len(delta_files), ) # config should only be None when we are preparing an in-memory SQLite db, # which should be empty. if config is None: raise ValueError( "config==None in prepare_database, but databse is not empty" ) # if it's a worker app, refuse to upgrade the database, to avoid multiple # workers doing it at once. if config.worker_app is not None and user_version != SCHEMA_VERSION: raise UpgradeDatabaseException( OUTDATED_SCHEMA_ON_WORKER_ERROR % (SCHEMA_VERSION, user_version) ) _upgrade_existing_database( cur, user_version, delta_files, upgraded, database_engine, config, databases=databases, ) else: logger.info("%r: Initialising new database", databases) # if it's a worker app, refuse to upgrade the database, to avoid multiple # workers doing it at once. if config and config.worker_app is not None: raise UpgradeDatabaseException(EMPTY_DATABASE_ON_WORKER_ERROR) _setup_new_database(cur, database_engine, databases=databases) # check if any of our configured dynamic modules want a database if config is not None: _apply_module_schemas(cur, database_engine, config) cur.close() db_conn.commit() except Exception: db_conn.rollback() raise