async def has_unique_constraint(self): table_name = clear_table_name(self._objects_table_name) result = await self.read_conn.fetch(''' SELECT * FROM pg_indexes WHERE tablename = '{}' AND indexname = '{}_parent_id_id_key'; '''.format(table_name, table_name)) return len(result) > 0
async def has_unique_constraint(self): table_name = clear_table_name(self._objects_table_name) result = await self.read_conn.fetch(''' SELECT * FROM pg_indexes WHERE tablename = '{}' AND indexname = '{}_parent_id_id_key'; '''.format(table_name, table_name)) return len(result) > 0
async def initialize(self, loop=None, **kw): self._connection_options = kw if self._connection_manager is None: self._connection_manager = PGConnectionManager( dsn=self._dsn, pool_size=self._pool_size, connection_options=self._connection_options, conn_acquire_timeout=self._conn_acquire_timeout, vacuum_class=self._vacuum_class) await self._connection_manager.initialize(loop, **kw) async with self.lock: if await self.has_unique_constraint(): self._supports_unique_constraints = True trash_sql = self._sql.get('CREATE_TRASH', self._objects_table_name) try: await self.initialize_tid_statements() await self.read_conn.execute(trash_sql) except asyncpg.exceptions.ReadOnlySQLTransactionError: # Not necessary for read-only pg pass except (asyncpg.exceptions.UndefinedTableError, asyncpg.exceptions.InvalidSchemaNameError): await self.create() # only available on new databases await self.read_conn.execute( self._unique_constraint.format( objects_table_name=self._objects_table_name, constraint_name=clear_table_name( self._objects_table_name), TRASHED_ID=TRASHED_ID)) self._supports_unique_constraints = True await self.initialize_tid_statements() await self.read_conn.execute(trash_sql) # migrate to larger VARCHAR size... result = await self.read_conn.fetch(""" select * from information_schema.columns where table_name='{}'""".format(self._objects_table_name)) if len(result) > 0 and result[0][ 'character_maximum_length'] != MAX_UID_LENGTH: log.warn('Migrating VARCHAR key length') await self.read_conn.execute(f''' ALTER TABLE {self._objects_table_name} ALTER COLUMN zoid TYPE varchar({MAX_UID_LENGTH})''' ) await self.read_conn.execute(f''' ALTER TABLE {self._objects_table_name} ALTER COLUMN of TYPE varchar({MAX_UID_LENGTH})''' ) await self.read_conn.execute(f''' ALTER TABLE {self._objects_table_name} ALTER COLUMN parent_id TYPE varchar({MAX_UID_LENGTH})''' ) await self.read_conn.execute(f''' ALTER TABLE {self._blobs_table_name} ALTER COLUMN bid TYPE varchar({MAX_UID_LENGTH})''' ) await self.read_conn.execute(f''' ALTER TABLE {self._blobs_table_name} ALTER COLUMN zoid TYPE varchar({MAX_UID_LENGTH})''' ) self._connection_initialized_on = time.time()
async def create(self, conn=None): if conn is None: conn = self.read_conn # Check DB log.info("Creating initial database objects") statements = [] if self._db_schema and self._db_schema != "public": statements.extend([f"CREATE SCHEMA IF NOT EXISTS {self._db_schema}"]) statements.extend( [ get_table_definition(self.objects_table_name, self._object_schema), get_table_definition( self.blobs_table_name, self._blob_schema, primary_keys=("bid", "zoid", "chunk_index") ), ] ) statements.extend(self._initialize_objects_statements) statements.extend(self._initialize_blobs_statements) with watch("create_db"): for statement in statements: otable_name = clear_table_name(self.objects_table_name) if otable_name == "objects": otable_name = "object" btable_name = clear_table_name(self.blobs_table_name) if btable_name == "blobs": btable_name = "blob" statement = statement.format( objects_table_name=self.objects_table_name, blobs_table_name=self.blobs_table_name, # singular, index names object_table_name=otable_name, blob_table_name=btable_name, schema=self._db_schema, ) try: await conn.execute(statement) except asyncpg.exceptions.UniqueViolationError: # this is okay on creation, means 2 getting created at same time pass
async def has_unique_constraint(self): result = await self.read_conn.fetch(''' SELECT * FROM information_schema.table_constraints AS tc JOIN information_schema.key_column_usage AS kcu ON tc.constraint_name = kcu.constraint_name JOIN information_schema.constraint_column_usage AS ccu ON ccu.constraint_name = tc.constraint_name WHERE tc.constraint_name = '{}_parent_id_id_key' AND tc.constraint_type = 'UNIQUE' '''.format(clear_table_name(self._objects_table_name))) return len(result) > 0
async def create(self): # Check DB log.info('Creating initial database objects') statements = [] if self._db_schema and self._db_schema != 'public': statements.extend( [f'CREATE SCHEMA IF NOT EXISTS {self._db_schema}']) statements.extend([ get_table_definition(self._objects_table_name, self._object_schema), get_table_definition(self._blobs_table_name, self._blob_schema, primary_keys=('bid', 'zoid', 'chunk_index')) ]) statements.extend(self._initialize_statements) for statement in statements: otable_name = clear_table_name(self._objects_table_name) if otable_name == 'objects': otable_name = 'object' btable_name = clear_table_name(self._blobs_table_name) if btable_name == 'blobs': btable_name = 'blob' statement = statement.format( objects_table_name=self._objects_table_name, blobs_table_name=self._blobs_table_name, # singular, index names object_table_name=otable_name, blob_table_name=btable_name, schema=self._db_schema) try: await self.read_conn.execute(statement) except asyncpg.exceptions.UniqueViolationError: # this is okay on creation, means 2 getting created at same time pass await self.initialize_tid_statements()
async def initialize(self, loop=None, **kw): self._connection_options = kw if self._connection_manager is None: self._connection_manager = PGConnectionManager( dsn=self._dsn, pool_size=self._pool_size, connection_options=self._connection_options, conn_acquire_timeout=self._conn_acquire_timeout, vacuum_class=self._vacuum_class) await self._connection_manager.initialize(loop, **kw) async with self.lock: if await self.has_unique_constraint(): self._supports_unique_constraints = True trash_sql = self._sql.get('CREATE_TRASH', self._objects_table_name) try: await self.initialize_tid_statements() await self.read_conn.execute(trash_sql) except asyncpg.exceptions.ReadOnlySQLTransactionError: # Not necessary for read-only pg pass except (asyncpg.exceptions.UndefinedTableError, asyncpg.exceptions.InvalidSchemaNameError): await self.create() # only available on new databases await self.read_conn.execute(self._unique_constraint.format( objects_table_name=self._objects_table_name, constraint_name=clear_table_name(self._objects_table_name), TRASHED_ID=TRASHED_ID )) self._supports_unique_constraints = True await self.initialize_tid_statements() await self.read_conn.execute(trash_sql) # migrate to larger VARCHAR size... result = await self.read_conn.fetch(""" select * from information_schema.columns where table_name='{}'""".format(self._objects_table_name)) if len(result) > 0 and result[0]['character_maximum_length'] != MAX_OID_LENGTH: log.warn('Migrating VARCHAR key length') await self.read_conn.execute(f''' ALTER TABLE {self._objects_table_name} ALTER COLUMN zoid TYPE varchar({MAX_OID_LENGTH})''') await self.read_conn.execute(f''' ALTER TABLE {self._objects_table_name} ALTER COLUMN of TYPE varchar({MAX_OID_LENGTH})''') await self.read_conn.execute(f''' ALTER TABLE {self._objects_table_name} ALTER COLUMN parent_id TYPE varchar({MAX_OID_LENGTH})''') await self.read_conn.execute(f''' ALTER TABLE {self._blobs_table_name} ALTER COLUMN bid TYPE varchar({MAX_OID_LENGTH})''') await self.read_conn.execute(f''' ALTER TABLE {self._blobs_table_name} ALTER COLUMN zoid TYPE varchar({MAX_OID_LENGTH})''') self._connection_initialized_on = time.time()
async def create(self): # Check DB log.info('Creating initial database objects') statements = [] if self._db_schema and self._db_schema != 'public': statements.extend([f'CREATE SCHEMA IF NOT EXISTS {self._db_schema}']) statements.extend([ get_table_definition(self._objects_table_name, self._object_schema), get_table_definition(self._blobs_table_name, self._blob_schema, primary_keys=('bid', 'zoid', 'chunk_index')) ]) statements.extend(self._initialize_statements) for statement in statements: otable_name = clear_table_name(self._objects_table_name) if otable_name == 'objects': otable_name = 'object' btable_name = clear_table_name(self._blobs_table_name) if btable_name == 'blobs': btable_name = 'blob' statement = statement.format( objects_table_name=self._objects_table_name, blobs_table_name=self._blobs_table_name, # singular, index names object_table_name=otable_name, blob_table_name=btable_name, schema=self._db_schema ) try: await self.read_conn.execute(statement) except asyncpg.exceptions.UniqueViolationError: # this is okay on creation, means 2 getting created at same time pass await self.initialize_tid_statements()
async def initialize(self, loop=None, **kw): self._connection_options = kw if self._connection_manager is None: self._connection_manager = self._connection_manager_class( dsn=self._dsn, pool_size=self._pool_size, connection_options=self._connection_options, conn_acquire_timeout=self._conn_acquire_timeout, vacuum_class=self._vacuum_class, autovacuum=self._autovacuum, db_schema=self._db_schema, ) await self._connection_manager.initialize(loop, **kw) with watch("initialize_db"): async with self.pool.acquire( timeout=self._conn_acquire_timeout) as conn: if await self.has_unique_constraint(conn): self._supports_unique_constraints = True trash_sql = self._sql.get("CREATE_TRASH", self.objects_table_name) try: await conn.execute(trash_sql) except asyncpg.exceptions.ReadOnlySQLTransactionError: # Not necessary for read-only pg pass except (asyncpg.exceptions.UndefinedTableError, asyncpg.exceptions.InvalidSchemaNameError): async with conn.transaction(): await self.create(conn) # only available on new databases for constraint in self._unique_constraints: await conn.execute( constraint.format( objects_table_name=self.objects_table_name, constraint_name=clear_table_name( self.objects_table_name), TRASHED_ID=TRASHED_ID, ).replace("CONCURRENTLY", "")) self._supports_unique_constraints = True await conn.execute(trash_sql) await notify(StorageCreatedEvent(self, db_conn=conn)) self._connection_initialized_on = time.time()
async def _migrate_constraint(storage, conn): table_name = clear_table_name(storage._objects_table_name) result = await conn.fetch(""" SELECT * FROM pg_indexes WHERE tablename = '{}' AND indexname = '{}_parent_id_id_key'; """.format(table_name, table_name)) if len(result) > 0: # check if we need to drop and create new constraint if TRASHED_ID not in result[0]["indexdef"]: # pragma: no cover await conn.execute(""" ALTER TABLE {} DROP CONSTRAINT {}_parent_id_id_key; """.format(storage._objects_table_name, table_name)) await conn.execute(storage._unique_constraints[0].format( objects_table_name=storage._objects_table_name, constraint_name=table_name, TRASHED_ID=TRASHED_ID, ))
async def initialize(self, loop=None, **kw): self._connection_options = kw if self._connection_manager is None: self._connection_manager = PGConnectionManager( dsn=self._dsn, pool_size=self._pool_size, connection_options=self._connection_options, conn_acquire_timeout=self._conn_acquire_timeout, vacuum_class=self._vacuum_class, autovacuum=self._autovacuum, ) await self._connection_manager.initialize(loop, **kw) async with self.lock: if await self.has_unique_constraint(): self._supports_unique_constraints = True trash_sql = self._sql.get("CREATE_TRASH", self._objects_table_name) try: await self.initialize_tid_statements() await self.read_conn.execute(trash_sql) except asyncpg.exceptions.ReadOnlySQLTransactionError: # Not necessary for read-only pg pass except (asyncpg.exceptions.UndefinedTableError, asyncpg.exceptions.InvalidSchemaNameError): await self.create() # only available on new databases await self.read_conn.execute( self._unique_constraint.format( objects_table_name=self._objects_table_name, constraint_name=clear_table_name( self._objects_table_name), TRASHED_ID=TRASHED_ID, )) self._supports_unique_constraints = True await self.initialize_tid_statements() await self.read_conn.execute(trash_sql) await notify(StorageCreatedEvent(self)) self._connection_initialized_on = time.time()
async def migrate_contraint(db): storage = db.storage if not IPostgresStorage.providedBy(storage): return # only for pg table_name = clear_table_name(storage._objects_table_name) result = await storage.read_conn.fetch(''' SELECT * FROM pg_indexes WHERE tablename = '{}' AND indexname = '{}_parent_id_id_key'; '''.format(table_name, table_name)) if len(result) > 0: # check if we need to drop and create new constraint if TRASHED_ID not in result[0]['indexdef']: await storage.read_conn.execute(''' ALTER TABLE {} DROP CONSTRAINT {}_parent_id_id_key; '''.format(storage._objects_table_name, table_name)) await storage.read_conn.execute(storage._unique_constraint.format( objects_table_name=storage._objects_table_name, constraint_name=table_name, TRASHED_ID=TRASHED_ID ))
async def migrate_contraint(db): storage = db.storage if not IPostgresStorage.providedBy(storage): return # only for pg table_name = clear_table_name(storage._objects_table_name) result = await storage.read_conn.fetch(''' SELECT * FROM pg_indexes WHERE tablename = '{}' AND indexname = '{}_parent_id_id_key'; '''.format(table_name, table_name)) if len(result) > 0: # check if we need to drop and create new constraint if TRASHED_ID not in result[0]['indexdef']: await storage.read_conn.execute(''' ALTER TABLE {} DROP CONSTRAINT {}_parent_id_id_key; '''.format(storage._objects_table_name, table_name)) await storage.read_conn.execute( storage._unique_constraint.format( objects_table_name=storage._objects_table_name, constraint_name=table_name, TRASHED_ID=TRASHED_ID))
async def initialize(self, loop=None, **kw): self._connection_options = kw if self._connection_manager is None: self._connection_manager = PGConnectionManager( dsn=self._dsn, pool_size=self._pool_size, connection_options=self._connection_options, conn_acquire_timeout=self._conn_acquire_timeout) await self._connection_manager.initialize(loop, **kw) async with self.lock: if await self.has_unique_constraint(): self._supports_unique_constraints = True trash_sql = self._sql.get('CREATE_TRASH', self._objects_table_name) try: await self.initialize_tid_statements() await self.read_conn.execute(trash_sql) except asyncpg.exceptions.ReadOnlySQLTransactionError: # Not necessary for read-only pg pass except (asyncpg.exceptions.UndefinedTableError, asyncpg.exceptions.InvalidSchemaNameError): await self.create() # only available on new databases await self.read_conn.execute( self._unique_constraint.format( objects_table_name=self._objects_table_name, constraint_name=clear_table_name( self._objects_table_name))) self._supports_unique_constraints = True await self.initialize_tid_statements() await self.read_conn.execute(trash_sql) # migrate to larger VARCHAR size... result = await self.read_conn.fetch(""" select * from information_schema.columns where table_name='{}'""".format(self._objects_table_name)) if len(result) > 0 and result[0][ 'character_maximum_length'] != MAX_OID_LENGTH: log.warn('Migrating VARCHAR key length') await self.read_conn.execute(f''' ALTER TABLE {self._objects_table_name} ALTER COLUMN zoid TYPE varchar({MAX_OID_LENGTH})''' ) await self.read_conn.execute(f''' ALTER TABLE {self._objects_table_name} ALTER COLUMN of TYPE varchar({MAX_OID_LENGTH})''' ) await self.read_conn.execute(f''' ALTER TABLE {self._objects_table_name} ALTER COLUMN parent_id TYPE varchar({MAX_OID_LENGTH})''' ) await self.read_conn.execute(f''' ALTER TABLE {self._blobs_table_name} ALTER COLUMN bid TYPE varchar({MAX_OID_LENGTH})''' ) await self.read_conn.execute(f''' ALTER TABLE {self._blobs_table_name} ALTER COLUMN zoid TYPE varchar({MAX_OID_LENGTH})''' ) self._vacuum = self._vacuum_class(self, loop) self._vacuum_task = asyncio.Task(self._vacuum.initialize(), loop=loop) def vacuum_done(task): if self._vacuum._closed: # if it's closed, we know this is expected return log.warning('Vacuum pg task ended. This should not happen. ' 'No database vacuuming will be done here anymore.') self._vacuum_task.add_done_callback(vacuum_done) self._connection_initialized_on = time.time()