async def upgrade_v1(conn: Connection) -> None: try: async with conn.transaction(): await conn.execute("CREATE TYPE threadtype AS ENUM " "('USER', 'GROUP', 'PAGE', 'UNKNOWN')") except DuplicateObjectError: pass is_legacy = await conn.fetchval(legacy_exist_query) if is_legacy: legacy_version = await conn.fetchval(legacy_version_query) if legacy_version != last_legacy_version: raise RuntimeError( "Legacy database is not on last version. Please upgrade the old " "database with alembic or drop it completely first.") already_renamed = await conn.fetchval(legacy_renamed_query) if not already_renamed: async with conn.transaction(): await rename_legacy_tables(conn) new_created = await conn.fetchval(new_tables_created_query) if not new_created: async with conn.transaction(): await create_v1_tables(conn) async with conn.transaction(): await migrate_legacy_data(conn) else: await create_v1_tables(conn)
async def create_tables(conn: Connection) -> None: """Create all the tables and indices required for this app.""" async with conn.transaction(): await conn.execute(users.users_creation) await conn.execute(users.users_index_creation) await conn.execute(todos.todos_creation) await conn.execute(todos.todos_index_creation)
async def update(connection: Connection) -> None: # Replace all log entries with level NOTSET # Split into two queries because on query would be very complicated # This is not very likely to every be needed detection_query = """ SELECT distinct action_id, messages FROM public.resourceaction, unnest(messages) arr(msg) WHERE msg->'level'='"NOTSET"'; """ update_query = """ UPDATE public.resourceaction SET messages = $1 WHERE action_id = $2 """ async with connection.transaction(): # Get all bad records results = await connection.fetch(detection_query) for result in results: # Decode, filter and update aid = result["action_id"] messages = result["messages"] for i, message in enumerate(messages): message_decoded = json.loads(message) if message_decoded["level"] == "NOTSET": message_decoded["level"] = "TRACE" messages[i] = json.dumps(message_decoded) await connection.execute(update_query, messages, aid)
async def get_actions(conn: Connection, id: str = None, experiment_id: str = None, limit: int = 500) -> Any: """ :param conn: Connection: :param id: str: (Default value = None) :param experiment_id: str: (Default value = None) :param limit: int: (Default value = 500) """ _q = [f'SELECT * FROM {Action.__tablename__}'] if id: _q.append(f'WHERE id = $1') if experiment_id: _q.append(f'WHERE experiment_id = $1' if not id else f'AND WHERE experiment_id = $2') if limit > 0: _q.append(f'LIMIT {limit}') query = ' '.join(_q) q_args = [arg for arg in [id, experiment_id] if arg] async with conn.transaction(): try: return await conn.fetch(query, *q_args) except asyncpg.exceptions.PostgresError as exc: logging.exception('Store error') raise StoreException from exc
async def query_subtract(connection: asyncpg.Connection, uuid: str, how_much: int) -> Optional[asyncpg.Record]: """Запрос на снятие указанной суммы со счёта клиента. :param connection: соединение :param uuid: идентификатор клиента :param how_much: количество копеек, которые нужно снять с баланса клиента :raises NotEnoughMoneyError: если на счёте клиента недостаточно денег """ async with connection.transaction(): row: Optional[asyncpg.Record] = await connection.fetchrow( """ UPDATE client SET hold = hold + GREATEST(0, $2) WHERE id = $1 AND is_open = TRUE RETURNING * """, uuid, how_much, ) if row is not None and row["balance"] - row["hold"] < 0: raise NotEnoughMoneyError return row
async def list( connection: asyncpg.Connection, table_schema: str = constants.MIGRATIONS_SCHEMA, table_name: str = constants.MIGRATIONS_TABLE, ) -> model.MigrationHistory: logger.debug('Getting a history of migrations') history = model.MigrationHistory() await connection.reload_schema_state() async with connection.transaction(): async for record in connection.cursor(""" select revision, label, timestamp, direction from {table_schema}.{table_name} order by timestamp asc; """.format( table_schema=table_schema, table_name=table_name, )): history.append( model.MigrationHistoryEntry( revision=model.Revision(record['revision']), label=record['label'], timestamp=record['timestamp'], direction=model.MigrationDir(record['direction']), ), ) return history
async def update(connection: Connection) -> None: """ Add the last_stable_status field in the public.resource table which represent the last stable state of a given resource. A stable state is any state different from the deploying state. """ schema_updates = """ CREATE TYPE non_deploying_resource_state AS ENUM('unavailable', 'skipped', 'dry', 'deployed', 'failed', 'available', 'cancelled', 'undefined', 'skipped_for_undefined', 'processing_events'); ALTER TABLE public.resource ADD COLUMN last_non_deploying_status non_deploying_resource_state NOT NULL DEFAULT 'available'; -- Change the default value of the `last_non_deploying_status` column to the correct value. WITH table_last_non_deploying_status AS ( SELECT DISTINCT ON (r.environment, r.resource_version_id) r.environment, r.resource_version_id, ra.status FROM resource AS r INNER JOIN resourceaction AS ra ON r.environment=ra.environment AND ra.resource_version_ids::varchar[] @> ARRAY[r.resource_version_id]::varchar[] WHERE ra.status IS NOT NULL AND ra.status!='deploying' ORDER BY r.environment, r.resource_version_id, ra.started DESC ) UPDATE resource AS r SET last_non_deploying_status=s.status::text::non_deploying_resource_state FROM table_last_non_deploying_status AS s WHERE r.environment=s.environment AND r.resource_version_id=s.resource_version_id """ async with connection.transaction(): await connection.execute(schema_updates)
async def delete_tables(conn: Connection) -> None: """Delete all the tables/indices used in this app.""" async with conn.transaction(): await conn.execute(users.users_index_deletion) await conn.execute(todos.todos_index_deletion) await conn.execute(users.users_deletion) await conn.execute(todos.todos_deletion)
async def update(connection: Connection) -> None: schema = """ ALTER TABLE public.environment ADD COLUMN last_version integer DEFAULT 0; UPDATE public.environment AS e SET last_version = (SELECT COALESCE( (SELECT MAX(version) FROM public.configurationmodel AS c WHERE c.environment=e.id), 0 )); ALTER TABLE public.resource ADD COLUMN resource_type varchar; UPDATE public.resource SET resource_type=substring(resource_id from '(.*)\\['); -- Set NOT NULL constraint after column is populated ALTER TABLE public.resource ALTER COLUMN resource_type SET NOT NULL; CREATE INDEX resource_environment_resource_type_index ON public.resource (environment, resource_type); """ async with connection.transaction(): await connection.execute(schema)
async def insert_trades(conn: asyncpg.Connection, trades: List[Trade], stock_id: int): async with conn.transaction(): await conn.executemany( '''insert into trade(stock, insider, relation, last_date, transaction, owner_type, shares_traded, last_price, shares_held) values ($1, $2, $3, $4, $5, $6, $7, $8, $9)''', [(stock_id, *trade) for trade in trades])
async def update(connection: Connection) -> None: schema = """ ALTER TABLE public.environment ADD COLUMN description varchar(255) DEFAULT '', ADD COLUMN icon varchar(65535) DEFAULT ''; """ async with connection.transaction(): await connection.execute(schema)
async def init_migrations_table(conn: asyncpg.Connection) -> None: """create the migrations table and its value""" async with conn.transaction(): await conn.execute( "create table if not exists migration_version (" " the boolean not null primary key default true," " constraint the check (the)," " id varchar default null);" "insert into migration_version default values on conflict do nothing;" )
async def get_all_users( conn: Connection) -> DataStatus(List[Record], str, bool): """Return all the registered users for this app.""" sql = "select email_address from public.users;" async with conn.transaction(): results = await conn.fetch(sql) if len(results) == 0: return DataStatus([], "No registered users found", False) return DataStatus(results, "", True)
async def create_tables(con: asyncpg.Connection, metadata: MetaData) -> None: """Create tables for a metadata instance.""" extant_tables = list( map( lambda r: r['table_name'], await con.fetch("SELECT table_name FROM information_schema.tables " "WHERE table_schema='public'"))) async with con.transaction(): for table in metadata.sorted_tables: if table.name not in extant_tables: await con.execute(str(CreateTable(table)))
async def insert_stock_data(conn: asyncpg.Connection, stock: str, prices: List[Price]) -> int: async with conn.transaction(): await conn.execute("insert into stock(name) values ($1)", stock) stock_id = await conn.fetchval("select id from stock where name=$1", stock) await conn.executemany( "insert into price values ($1, $2, $3, $4, $5, $6, $7)", [(stock_id, *price) for price in prices]) return stock_id
async def query_unhold_all(connection: asyncpg.Connection) -> None: """Запрос для обновления баланса и обнуления холда у всех клиентов. :param connection: соединение """ async with connection.transaction(): await connection.execute(""" UPDATE client SET balance = balance - hold, hold = 0 """)
async def enforce_unique_agent_instances(connection: Connection) -> None: """ Deletes duplicate AgentInstance records and adds a uniqueness constraint. """ async with connection.transaction(): await connection.execute(""" DELETE FROM public.agentinstance a USING public.agentinstance b WHERE a.id < b.id AND a.tid = b.tid AND a.process = b.process AND a.name = b.name ; ALTER TABLE public.agentinstance ADD CONSTRAINT agentinstance_unique UNIQUE (tid, process, name); """)
async def create_tables(connection: asyncpg.Connection, lock: asyncio.Lock): create_tables_sql = ( """sessions ( session_id varchar(255), dc_id integer, server_address text, port integer, auth_key bytea, takeout_id integer, primary key(session_id, dc_id) )""", """entities ( session_id varchar(255), id bigint, hash bigint not null, username text , phone bigint default null, name text, primary key(session_id, id) )""", """sent_files ( session_id varchar(255), md5_digest bytea, file_size integer, type integer, id bigint, hash bigint, primary key(session_id, md5_digest, file_size, type) )""", """update_state ( session_id varchar(255), id integer, pts integer, qts integer, date integer, seq integer, primary key(session_id, id) )""") async with lock: logger.debug(f"Creating schema(`asyncpg_telethon`) and tables: {TABLES} with schema `asyncpg_telethon`") async with connection.transaction(isolation="read_committed"): await connection.execute("""create schema if not exists "asyncpg_telethon";""") # table is sure safe to be passed by f'' to query. await connection.execute("".join( f"""create table if not exists "asyncpg_telethon".{table};""" for table in create_tables_sql )) logger.debug("Tables created")
async def __get_geolocation(self, connection: Connection, geodict: Dict[str, Any]) -> Record: """ Inserts if not exists and then returns geolocation record. Exception handling is needed for the race condition avoidance. :param connection: DB connection :param geodict: dictionary with geolocation's attributes :return: """ try: async with connection.transaction(): return await self.__create_geolocation(connection, geodict) except UniqueViolationError: return await self.__find_geolocation(connection, geodict)
async def insert_user(conn: Connection, email_address) -> DataStatus(List[Record], str, bool): """Insert a new user given email_address.""" sql = "INSERT INTO users(created_timestamp, modified_timestamp, email_address) VALUES ($1, $2, $3);" now = tzware_datetime() try: async with conn.transaction(): result = await conn.execute(sql, now, now, email_address) if result == "INSERT 0 1": return DataStatus([], "Successfully registered the customers", True) return DataStatus([], "Something went wrong", False) except Exception as ex: return DataStatus([], ex.message, False)
async def save_action(conn: Connection, action: Dict) -> Any: """ :param conn: Connection: :param action: Dict: """ async with conn.transaction(): try: await conn.execute( f'INSERT INTO {Action.__tablename__} (id, experiment_id, variant_id, reward, context) VALUES ($1, $2, $3, $4, $5) ON CONFLICT (id) DO NOTHING', action['id'], action['experiment_id'], action['variant_id'], int(action['reward']), json.dumps(action['context']) ) return action['id'] except (asyncpg.exceptions.PostgresError, asyncpg.exceptions.DataError) as exc: logging.exception('Store error') raise StoreException from exc
async def update(connection: Connection) -> None: schema = """ ALTER TABLE public.resource ADD COLUMN resource_id_value varchar; UPDATE public.resource SET resource_id_value=substring(resource_id from '\\[(?:.*),(?:.*)=(.*)\\]'); -- Set NOT NULL constraint after column is populated ALTER TABLE public.resource ALTER COLUMN resource_id_value SET NOT NULL; CREATE INDEX resource_environment_resource_id_value_index ON public.resource (environment, resource_id_value); """ async with connection.transaction(): await connection.execute(schema)
async def store(self, connection: Connection): """ stores the bidding object in the database :param connection: connection to the database :return: """ async with connection.transaction(): await connection.execute(BiddingObject.sql_hdr_insert, self.get_parent_key(), self.get_key(), self.get_session(), self.get_type().value, self.get_state().value, self.get_resource_request_key(), self.get_process_request_key()) # Insert the elements within the bidding object for element_name in self.elements: await connection.execute(BiddingObject.sql_element_insert, self.get_parent_key(), self.get_key(), element_name) for field_name in self.elements[element_name]: config_param: ConfigParam = self.elements[element_name][ field_name] await connection.execute( BiddingObject.sql_element_field_insert, self.get_parent_key(), self.get_key(), element_name, field_name, config_param.get_type().value, config_param.get_value()) # Inserts the option within the bidding object for option_name in self.options: await connection.execute(BiddingObject.sql_option_insert, self.get_parent_key(), self.get_key(), option_name) for field_name in self.options[option_name]: config_param: ConfigParam = self.options[option_name][ field_name] await connection.execute( BiddingObject.sql_option_field_insert, self.get_parent_key(), self.get_key(), option_name, field_name, config_param.get_type().value, config_param.get_value())
async def update_ordinal_sets(conn: asyncpg.Connection, set_expert: PostgresDBExpert): watcher = OrdinalSetWatcher() rset = await conn.fetch(""" SELECT DISTINCT ON (ordinal) card_index_v1.id, ordinal, member_group, member, subtype FROM history_v5__card_ids INNER JOIN history_v5 USING (id, serverid) INNER JOIN card_index_v1 ON (card_id = card_index_v1.id) WHERE (subtype = 2 OR subtype = 3) AND rarity = 30 AND serverid = 'jp' AND history_v5__card_ids.what > 1 ORDER BY ordinal, sort_date """) for cid, ordinal, member_group, member, subtype in rset: watcher.observe(cid, ordinal, subtype, member, member_group) async with conn.transaction(): for s in watcher.generate_sets(): await set_expert.add_object(conn, s)
async def import_users_from_legacy_AD(conn: Connection, ext_identifier_sys_id, users: List[User]): for user in users: async with conn.transaction(): name = user.GivenName.strip().split(" ") middle = name[1] if (len(name) > 1) else None row = await conn.fetchrow( "SELECT * FROM odm2.affiliations WHERE primaryemail=$1", user.UserPrincipalName, ) people = PeopleCreate( personfirstname=name[0], personmiddlename=middle, personlastname=user.Surname, ) if not row: stored_person = await insert_pydantic_object( conn, "odm2.people", people, People) await create_affiliations(conn, stored_person, user), await create_external_identifier(conn, stored_person, ext_identifier_sys_id, user) else: logging.info(f"User already exists in db", extra={"user": user}) ad_reference = await conn.fetchrow( "SELECT * FROM odm2.personexternalidentifiers where personexternalidentifier=$1", user.SamAccountName, ) if not ad_reference: person = People(**{**row, **people.dict()}) logging.info( f"Storing SamAccountName for existing user without reference", extra={ "person": person, "sam_account_name": user.SamAccountName, }, ) await create_external_identifier(conn, person, ext_identifier_sys_id, user)
async def rename_legacy_tables(conn: Connection) -> None: await conn.execute( "ALTER TABLE mx_user_profile RENAME TO legacy_mx_user_profile") await conn.execute( "ALTER TABLE mx_room_state RENAME TO legacy_mx_room_state") try: async with conn.transaction(): await conn.execute( "ALTER TYPE membership RENAME TO legacy_membership") except UndefinedObjectError: pass await conn.execute("ALTER TABLE message RENAME TO legacy_message") await conn.execute("ALTER TABLE portal RENAME TO legacy_portal") await conn.execute("ALTER TABLE puppet RENAME TO legacy_puppet") await conn.execute("ALTER TABLE reaction RENAME TO legacy_reaction") await conn.execute('ALTER TABLE "user" RENAME TO legacy_user') await conn.execute("ALTER TABLE user_portal RENAME TO legacy_user_portal") await conn.execute("ALTER TABLE contact RENAME TO legacy_contact")
async def update(connection: Connection) -> None: schema = """ ALTER TABLE public.compile ADD COLUMN requested timestamp, ADD COLUMN metadata JSONB, ADD COLUMN environment_variables JSONB, ADD COLUMN do_export boolean, ADD COLUMN force_update boolean, ADD COLUMN success boolean, ADD COLUMN version integer, ADD COLUMN remote_id uuid, ADD COLUMN handled boolean; ALTER TABLE public.report ALTER COLUMN completed DROP NOT NULL; CREATE INDEX compile_env_requested_index ON compile (environment, requested ASC); CREATE INDEX compile_env_remote_id_index ON compile (environment, remote_id); """ async with connection.transaction(): await connection.execute(schema)
async def test_unique_agent_instances(migrate_v6_to_v7: None, postgresql_client: Connection) -> None: # assert that existing documents have been merged and expired state has been set correctly async with postgresql_client.transaction(): records: Cursor = postgresql_client.cursor(""" SELECT COUNT(*) FROM public.agentinstance GROUP BY tid, process, name ; """) assert all([record["count"] == 1 async for record in records]) # assert unique constraint is present constraints = await postgresql_client.fetch(""" SELECT pg_catalog.pg_get_constraintdef(r.oid, true) as condef FROM pg_catalog.pg_constraint r WHERE conname='agentinstance_unique' """) assert len(constraints) == 1 assert constraints[0]["condef"] == "UNIQUE (tid, process, name)"
async def update(connection: Connection) -> None: schema = """ CREATE TYPE notificationseverity AS ENUM('message', 'info', 'success', 'warning', 'error'); -- Table: public.notification CREATE TABLE IF NOT EXISTS public.notification ( id uuid NOT NULL, environment uuid NOT NULL REFERENCES environment(id) ON DELETE CASCADE, created TIMESTAMP WITH TIME ZONE NOT NULL, title varchar NOT NULL, message varchar NOT NULL, severity notificationseverity DEFAULT 'message', uri varchar NOT NULL, read boolean NOT NULL DEFAULT FALSE, cleared boolean NOT NULL DEFAULT FALSE, PRIMARY KEY(environment, id) ); CREATE INDEX IF NOT EXISTS notification_env_created_id_index ON notification (environment, created DESC, id); """ async with connection.transaction(): await connection.execute(schema)
async def migrate(db: asyncpg.Connection = None): migrations = get_available() try: current = await db.fetchval("SELECT migration FROM jobs.migrations") except asyncpg.exceptions.UndefinedTableError: current = 0 logger.info("Current migration %s", current) applied = current async with db.transaction(): for avail in sorted(list(migrations.keys())): if avail > current: logger.info("Appling migration %s", migrations[avail]) data = load_migration(migrations[avail]) await db.execute(data) applied = avail if applied != current: logger.info("Update migrations history version: %s", applied) await db.execute("update jobs.migrations set migration=$1", applied) else: logger.info("No migrations applied. Your db it's at latest version")