def column_upgrade(table_name, column_name, enum_name): if op.get_context().bind.dialect.name != 'postgresql': if op.get_context().bind.dialect.name == 'mssql': op.drop_constraint('ck_ps_endpoints_identify_by_pjsip_identify_by_values', table_name) op.alter_column(table_name, column_name, type_=sa.String(80)) return # Postgres requires a few more steps op.execute('ALTER TABLE ' + table_name + ' ALTER COLUMN ' + column_name + ' TYPE varchar(80) USING identify_by::text::' + enum_name) op.execute('DROP TYPE ' + enum_name)
def downgrade(): # Convert to UUID for PostreSQL or to CHAR(32) for others if op.get_context().dialect.name == 'sqlite': pass # No difference between varchar and char in SQLite elif op.get_context().dialect.name == 'postgresql': drop_user_id_fkeys() for table, col in ( ("user", "id"), ("sender", "user_id"), ("vote", "user_id") ): op.alter_column(table, col, type_=sa.Unicode(255), existing_type=types.UUID, existing_nullable=False) # Need cascade for data conversion below, it will be removed by the # last operation (or the loop on FKEYS_CASCADE if offline). create_user_id_fkeys("CASCADE") else: # Untested on other engines for table, col in ( ("user", "id"), ("sender", "user_id"), ("vote", "user_id") ): op.alter_column(table, col, type_=sa.Unicode(255), existing_type=types.UUID, existing_nullable=False) if not context.is_offline_mode(): connection = op.get_bind() # Create a new MetaData instance here because the data is UUIDs and we # want to convert to simple strings metadata = sa.MetaData() metadata.bind = connection User = Base.metadata.tables["user"].tometadata(metadata) User = sa.Table("user", metadata, sa.Column("id", sa.Unicode(255), primary_key=True), extend_existing=True) transaction = connection.begin() for user in User.select().execute(): try: new_user_id = UUID(user.id).int except ValueError: continue # Already converted User.update().where( User.c.id == user.id ).values(id=new_user_id).execute() transaction.commit() if connection.dialect.name != "sqlite": drop_user_id_fkeys() create_user_id_fkeys(None) # Now remove onupdate=CASCADE from some foreign keys rebuild_fkeys(None)
def upgrade(): # Convert existing data into UUID strings if not context.is_offline_mode(): connection = op.get_bind() # Create a new MetaData instance here because the data is not proper # UUIDs yet so it'll error out. metadata = sa.MetaData() metadata.bind = connection User = Base.metadata.tables["user"].tometadata(metadata) User = sa.Table("user", metadata, sa.Column("id", sa.Unicode(255), primary_key=True), extend_existing=True) if connection.dialect.name != "sqlite": drop_user_id_fkeys() create_user_id_fkeys("CASCADE") transaction = connection.begin() for user in User.select().execute(): try: new_user_id = unicode(UUID(int=int(user.id))) except ValueError: continue # Already converted User.update().where( User.c.id == user.id ).values(id=new_user_id).execute() transaction.commit() # Convert to UUID for PostreSQL or to CHAR(32) for others if op.get_context().dialect.name == 'sqlite': pass # No difference between varchar and char in SQLite elif op.get_context().dialect.name == 'postgresql': drop_user_id_fkeys() for table, col in ( ("user", "id"), ("sender", "user_id"), ("vote", "user_id") ): op.execute(''' ALTER TABLE "{table}" ALTER COLUMN {col} TYPE UUID USING {col}::uuid '''.format(table=table, col=col)) create_user_id_fkeys("CASCADE") else: # Untested on other engines for table, col in ( ("user", "id"), ("sender", "user_id"), ("vote", "user_id") ): op.alter_column(table, col, type_=types.UUID, existing_type=sa.Unicode(255), existing_nullable=False) # Now add onupdate=CASCADE to some foreign keys. rebuild_fkeys("CASCADE")
def downgrade(): """Downgrade database.""" ctx = op.get_context() insp = Inspector.from_engine(ctx.connection.engine) for fk in insp.get_foreign_keys('b2share_block_schema'): if fk['referred_table'] == 'b2share_community': op.drop_constraint( op.f(fk['name']), 'b2share_block_schema', type_='foreignkey' ) op.drop_table('b2share_block_schema_version') for fk in insp.get_foreign_keys('b2share_community_schema_version'): if fk['referred_table'] == 'b2share_community': op.drop_constraint( op.f(fk['name']), 'b2share_community_schema_version', type_='foreignkey' ) op.drop_table('b2share_community_schema_version') op.drop_table('b2share_block_schema') op.drop_table('b2share_root_schema_version')
def downgrade(): # Was unable to find a way to use op.alter_column() to remove the # unique index property. if op.get_context().bind.dialect.name == 'mssql': op.drop_constraint('uq_queue_members_uniqueid', 'queue_members') op.drop_column('queue_members', 'uniqueid') op.add_column('queue_members', sa.Column(name='uniqueid', type_=sa.String(80), nullable=False))
def upgrade(): # NOTE(sheeprine): Hack to let the migrations pass for postgresql dialect = op.get_context().dialect.name if dialect == 'postgresql': constraints = ['uniq_field_threshold', 'uniq_service_threshold'] else: constraints = ['uniq_field_mapping', 'uniq_service_mapping'] op.create_table('hashmap_thresholds', sa.Column('id', sa.Integer(), nullable=False), sa.Column('threshold_id', sa.String(length=36), nullable=False), sa.Column('level', sa.Numeric(precision=20, scale=8), nullable=True), sa.Column('cost', sa.Numeric(precision=20, scale=8), nullable=False), sa.Column('map_type', sa.Enum('flat', 'rate', name='enum_map_type'), nullable=False), sa.Column('service_id', sa.Integer(), nullable=True), sa.Column('field_id', sa.Integer(), nullable=True), sa.Column('group_id', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['field_id'], ['hashmap_fields.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['group_id'], ['hashmap_groups.id'], ondelete='SET NULL'), sa.ForeignKeyConstraint(['service_id'], ['hashmap_services.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('threshold_id'), sa.UniqueConstraint('level', 'field_id', name=constraints[0]), sa.UniqueConstraint('level', 'service_id', name=constraints[1]), mysql_charset='utf8', mysql_engine='InnoDB')
def upgrade(active_plugins=None, options=None): if not migration.should_run(active_plugins, migration_for_plugins): return # Workaround for Alemic bug #89 # https://bitbucket.org/zzzeek/alembic/issue/89 context = op.get_context() if context.bind.dialect.name == "postgresql": op.execute( "CREATE TYPE ipv6_ra_modes AS ENUM ('%s', '%s', '%s')" % ("slaac", "dhcpv6-stateful", "dhcpv6-stateless") ) op.execute( "CREATE TYPE ipv6_address_modes AS ENUM ('%s', '%s', '%s')" % ("slaac", "dhcpv6-stateful", "dhcpv6-stateless") ) op.add_column( "subnets", sa.Column( "ipv6_ra_mode", sa.Enum("slaac", "dhcpv6-stateful", "dhcpv6-stateless", name="ipv6_ra_modes"), nullable=True ), ) op.add_column( "subnets", sa.Column( "ipv6_address_mode", sa.Enum("slaac", "dhcpv6-stateful", "dhcpv6-stateless", name="ipv6_address_modes"), nullable=True, ), )
def downgrade(): current_context = op.get_context() meta = current_context.opts['target_metadata'] user = sa.Table('users', meta, autoload=True) # remove all records to undo the preseed. op.execute(user.delete())
def upgrade(): ctx = op.get_context() con = op.get_bind() table_exists = ctx.dialect.has_table(con.engine, 'project_quotas') if not table_exists: op.create_table( 'project_quotas', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=False), sa.Column('updated_at', sa.DateTime(), nullable=False), sa.Column('deleted_at', sa.DateTime(), nullable=True), sa.Column('deleted', sa.Boolean(), nullable=False), sa.Column('status', sa.String(length=20), nullable=False), sa.Column('project_id', sa.String(length=36), nullable=False), sa.Column('secrets', sa.Integer(), nullable=True), sa.Column('orders', sa.Integer(), nullable=True), sa.Column('containers', sa.Integer(), nullable=True), sa.Column('transport_keys', sa.Integer(), nullable=True), sa.Column('consumers', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['project_id'], ['projects.id'], name='project_quotas_fk'), sa.PrimaryKeyConstraint('id'), mysql_engine='InnoDB') op.create_index( op.f('ix_project_quotas_project_id'), 'project_quotas', ['project_id'], unique=False)
def upgrade(): # sqlite doesn't have ALTER command cx = op.get_context() if 'sqlite' in cx.connection.engine.name: with op.batch_alter_table("records") as batch_op: batch_op.add_column(sa.Column('fingerprints', sa.Text)) batch_op.drop_column('meta_data') batch_op.drop_column('orcid_claims') batch_op.drop_column('nonbib_data') batch_op.drop_column('fulltext') batch_op.drop_column('meta_data_updated') batch_op.drop_column('orcid_claims_updated') batch_op.drop_column('nonbib_data_updated') batch_op.drop_column('fulltext_updated') else: op.add_column('records', sa.Column('fingerprints', sa.Text)) op.drop_column('records', 'meta_data') op.drop_column('records', 'orcid_claims') op.drop_column('records', 'nonbib_data') op.drop_column('records', 'fulltext') op.drop_column('records', 'meta_data_updated') op.drop_column('records', 'orcid_claims_updated') op.drop_column('records', 'nonbib_data_updated') op.drop_column('records', 'fulltext_updated')
def downgrade(): cx = op.get_context() if 'sqlite' in cx.connection.engine.name: with op.batch_alter_table("records") as batch_op: batch_op.drop_column('fingerprints') batch_op.add_column(sa.Column('meta_data', sa.Text)) batch_op.add_column(sa.Column('orcid_claims', sa.Text)) batch_op.add_column(sa.Column('nonbib_data', sa.Text)) batch_op.add_column(sa.Column('fulltext', sa.Text)) batch_op.add_column(sa.Column('meta_data_updated', sa.TIMESTAMP)) batch_op.add_column(sa.Column('orcid_claims_updated', sa.TIMESTAMP)) batch_op.add_column(sa.Column('nonbib_data_updated', sa.TIMESTAMP)) batch_op.add_column(sa.Column('fulltext_updated', sa.TIMESTAMP)) else: op.drop_column('records', 'fingerprints') op.add_column('records', sa.Column('meta_data', sa.Text)) op.add_column('records', sa.Column('orcid_claims', sa.Text)) op.add_column('records', sa.Column('nonbib_data', sa.Text)) op.add_column('records', sa.Column('fulltext', sa.Text)) op.add_column('records', sa.Column('meta_data_updated', sa.TIMESTAMP)) op.add_column('records', sa.Column('orcid_claims_updated', sa.TIMESTAMP)) op.add_column('records', sa.Column('nonbib_data_updated', sa.TIMESTAMP)) op.add_column('records', sa.Column('fulltext_updated', sa.TIMESTAMP))
def downgrade(): context = op.get_context() dialect = context.bind.dialect.name op.drop_constraint( name=PK_NAME, table_name=TABLE_NAME, type_='primary' ) op.add_column( TABLE_NAME, sa.Column('id', sa.String(32)) ) if dialect == 'ibm_db_sa': # DB2 doesn't support nullable column in primary key op.alter_column( table_name=TABLE_NAME, column_name='id', nullable=False ) with migration.remove_fks_from_table(TABLE_NAME): op.create_primary_key( name=PK_NAME, table_name=TABLE_NAME, cols=['id'] )
def upgrade(): #Note: alter type can't be run in a transaction, this needs transaction_per_migration=True in the context, too. if not op.get_context().as_sql: connection = op.get_bind() connection.execution_options(isolation_level='AUTOCOMMIT') op.execute(DDL("ALTER TYPE candidate_source ADD VALUE 'manual'")) op.execute(DDL("ALTER TYPE candidate_source ADD VALUE 'ale'"))
def upgrade(): metadata = _get_database_metadata() # Get relevant tables secrets = metadata.tables['secrets'] project_secret = metadata.tables['project_secret'] # Add project_id to the secrets op.execute(secrets.update(). values({'project_id': project_secret.c.project_id}). where(secrets.c.id == project_secret.c.secret_id). where(secrets.c.project_id == None) ) # Need to drop foreign key constraint before mysql will allow changes ctx = op.get_context() _drop_constraint(ctx, 'secrets_project_fk', 'secrets') # make project_id no longer nullable op.alter_column('secrets', 'project_id', type_=sa.String(36), nullable=False) # Create foreign key constraint again _create_constraint(ctx, 'secrets_project_fk', 'secrets', 'projects', ['project_id'], ['id'])
def upgrade(): cx = op.get_context() with op.batch_alter_table("records") as batch_op: batch_op.add_column(sa.Column('direct_data', sa.Text)) batch_op.add_column(sa.Column('direct_updated', sa.TIMESTAMP)) batch_op.add_column(sa.Column('direct_processed', sa.TIMESTAMP))
def upgrade(): # Workaround for Alemic bug #89 # https://bitbucket.org/zzzeek/alembic/issue/89 context = op.get_context() if context.bind.dialect.name == "postgresql": op.execute( "CREATE TYPE ipv6_ra_modes AS ENUM ('%s', '%s', '%s')" % ("slaac", "dhcpv6-stateful", "dhcpv6-stateless") ) op.execute( "CREATE TYPE ipv6_address_modes AS ENUM ('%s', '%s', '%s')" % ("slaac", "dhcpv6-stateful", "dhcpv6-stateless") ) op.add_column( "subnets", sa.Column( "ipv6_ra_mode", sa.Enum("slaac", "dhcpv6-stateful", "dhcpv6-stateless", name="ipv6_ra_modes"), nullable=True ), ) op.add_column( "subnets", sa.Column( "ipv6_address_mode", sa.Enum("slaac", "dhcpv6-stateful", "dhcpv6-stateless", name="ipv6_address_modes"), nullable=True, ), )
def upgrade(): # In order to sanitize the data during migration, # the current records in the table need to be verified # and all the duplicate records which violate the PK # constraint need to be removed. context = op.get_context() if context.bind.dialect.name == 'postgresql': op.execute('DELETE FROM %(table)s WHERE id in (' 'SELECT %(table)s.id FROM %(table)s LEFT OUTER JOIN ' '(SELECT MIN(id) as id, router_id, l3_agent_id ' ' FROM %(table)s GROUP BY router_id, l3_agent_id) AS temp ' 'ON %(table)s.id = temp.id WHERE temp.id is NULL);' % {'table': TABLE_NAME}) else: op.execute('DELETE %(table)s FROM %(table)s LEFT OUTER JOIN ' '(SELECT MIN(id) as id, router_id, l3_agent_id ' ' FROM %(table)s GROUP BY router_id, l3_agent_id) AS temp ' 'ON %(table)s.id = temp.id WHERE temp.id is NULL;' % {'table': TABLE_NAME}) op.drop_column(TABLE_NAME, 'id') op.create_primary_key( name=PK_NAME, table_name=TABLE_NAME, cols=['router_id', 'l3_agent_id'] )
def upgrade(): op.create_index(op.f('ix_certificate_authority_metadata_ca_id'), 'certificate_authority_metadata', ['ca_id'], unique=False) op.create_index(op.f('ix_certificate_authority_metadata_key'), 'certificate_authority_metadata', ['key'], unique=False) op.create_index(op.f('ix_container_consumer_metadata_container_id'), 'container_consumer_metadata', ['container_id'], unique=False) op.create_index(op.f('ix_container_secret_container_id'), 'container_secret', ['container_id'], unique=False) op.create_index(op.f('ix_container_secret_secret_id'), 'container_secret', ['secret_id'], unique=False) op.create_index(op.f('ix_containers_project_id'), 'containers', ['project_id'], unique=False) op.create_index(op.f('ix_encrypted_data_kek_id'), 'encrypted_data', ['kek_id'], unique=False) op.create_index(op.f('ix_encrypted_data_secret_id'), 'encrypted_data', ['secret_id'], unique=False) op.create_index(op.f('ix_kek_data_project_id'), 'kek_data', ['project_id'], unique=False) op.create_index(op.f('ix_order_barbican_metadata_order_id'), 'order_barbican_metadata', ['order_id'], unique=False) op.create_index(op.f('ix_order_plugin_metadata_order_id'), 'order_plugin_metadata', ['order_id'], unique=False) op.create_index(op.f('ix_order_retry_tasks_order_id'), 'order_retry_tasks', ['order_id'], unique=False) op.create_index(op.f('ix_orders_container_id'), 'orders', ['container_id'], unique=False) op.create_index(op.f('ix_orders_project_id'), 'orders', ['project_id'], unique=False) op.create_index(op.f('ix_orders_secret_id'), 'orders', ['secret_id'], unique=False) ctx = op.get_context() _drop_constraint(ctx, 'preferred_certificate_authorities_ibfk_1', 'preferred_certificate_authorities') op.alter_column('preferred_certificate_authorities', 'ca_id', existing_type=sa.VARCHAR(length=36), nullable=False) op.create_foreign_key('preferred_certificate_authorities_fk', 'preferred_certificate_authorities', 'certificate_authorities', ['ca_id'], ['id']) op.create_index(op.f('ix_preferred_certificate_authorities_ca_id'), 'preferred_certificate_authorities', ['ca_id'], unique=False) op.create_index(op.f('ix_preferred_certificate_authorities_project_id'), 'preferred_certificate_authorities', ['project_id'], unique=True) op.create_index(op.f('ix_project_certificate_authorities_ca_id'), 'project_certificate_authorities', ['ca_id'], unique=False) op.create_index(op.f('ix_project_certificate_authorities_project_id'), 'project_certificate_authorities', ['project_id'], unique=False) op.create_index(op.f('ix_project_secret_project_id'), 'project_secret', ['project_id'], unique=False) op.create_index(op.f('ix_project_secret_secret_id'), 'project_secret', ['secret_id'], unique=False) op.create_index(op.f('ix_secret_store_metadata_secret_id'), 'secret_store_metadata', ['secret_id'], unique=False)
def upgrade(): """Add 'time_stamp' column to 'node_feature' table and update all dependencies effected by this change. 1) Drop the dependent view 2) Add the new column 3) Update all dependent stored procedures and recreate the view to include the new column """ db_dialect = op.get_context().dialect if 'postgresql' in db_dialect.name: op.execute(textwrap.dedent("DROP VIEW IF EXISTS node_features_view;")) else: print("View 'node_features_view' was not drop in prepare for adding " "new 'time_stamp' column to 'node_feature' table. " "'%s' is not a supported database dialect." % db_dialect.name) return op.add_column('node_feature', sa.Column('time_stamp', sa.DateTime(), nullable=False, server_default=sa.func.now())) if 'postgresql' in db_dialect.name: _postgresql_upgrade_ddl() else: print("Functions were not updated. " "'%s' is not a supported database dialect." % db_dialect.name) return
def downgrade(): cx = op.get_context() if 'sqlite' in cx.connection.engine.name: with op.batch_alter_table("records") as batch_op: batch_op.alter_column('fingerprint', new_column_name='fingerprints') else: op.alter_column('records', 'fingerprint', new_column_name='fingerprints')
def downgrade(): cx = op.get_context() if 'sqlite' in cx.connection.engine.name: with op.batch_alter_table("records") as batch_op: batch_op.drop_column('direct_created') else: op.drop_column('records', 'direct_created')
def upgrade(): # Workaround for Alembic bug #89 # https://bitbucket.org/zzzeek/alembic/issue/89 context = op.get_context() if context.bind.dialect.name == 'postgresql': op.execute("CREATE TYPE ipv6_ra_modes AS ENUM ('%s', '%s', '%s')" % ('slaac', 'dhcpv6-stateful', 'dhcpv6-stateless')) op.execute("CREATE TYPE ipv6_address_modes AS ENUM ('%s', '%s', '%s')" % ('slaac', 'dhcpv6-stateful', 'dhcpv6-stateless')) op.add_column('subnets', sa.Column('ipv6_ra_mode', sa.Enum('slaac', 'dhcpv6-stateful', 'dhcpv6-stateless', name='ipv6_ra_modes'), nullable=True) ) op.add_column('subnets', sa.Column('ipv6_address_mode', sa.Enum('slaac', 'dhcpv6-stateful', 'dhcpv6-stateless', name='ipv6_address_modes'), nullable=True) )
def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.create_table( "durations", sa.Column("id", sa.Integer(), nullable=False), sa.Column("name", sa.String(length=255), nullable=False), sa.Column("duration", sa.Integer(), nullable=False), sa.Column("inactive", sa.Boolean(), nullable=True), sa.PrimaryKeyConstraint("id"), ) op.add_column("talks", sa.Column("duration_id", sa.Integer(), nullable=True)) op.create_foreign_key(None, "talks", "durations", ["duration_id"], ["id"]) ### end Alembic commands ### op.bulk_insert( durations_table, [ {"name": "30 minutes", "duration": 30, "inactive": False}, {"name": "45 minutes", "duration": 45, "inactive": False}, {"name": "60 minutes", "duration": 60, "inactive": False}, {"name": "1/2 day", "duration": 180, "inactive": False}, {"name": "full day", "duration": 360, "inactive": False}, ], ) op.execute("UPDATE talks AS t SET duration_id = d.id FROM durations AS d WHERE t.duration::text = d.name") op.alter_column("talks", "duration_id", existing_type=sa.INTEGER(), nullable=False) op.drop_column("talks", "duration") context = op.get_context() if context.bind.dialect.name == "postgresql": sql = "DROP TYPE duration" op.execute(sql)
def upgrade(): if op.get_context().dialect.name == 'postgresql': # INFO - G.M - 2018-11-27 - TO modify type in postgresq, we should # create a new one set column type to this new one and remove old one op.execute("ALTER TYPE authtype RENAME TO authtype_old;") op.execute("ALTER TABLE users alter auth_type drop default;") enum = sa.Enum(*new_auth_type_list, name='authtype') enum.create(op.get_bind(), checkfirst=False) with op.batch_alter_table('users') as batch_op: batch_op.alter_column( 'auth_type', type_=enum, postgresql_using="auth_type::text::authtype", server_default='INTERNAL' ) op.execute("DROP TYPE authtype_old;") else: # INFO - G.M - 2018-11-27 - MYSQL case enum = sa.Enum(*new_auth_type_list, name='authtype') enum.create(op.get_bind(), checkfirst=False) with op.batch_alter_table('users') as batch_op: batch_op.alter_column( 'auth_type', type_=enum, )
def upgrade(): # sqlite doesn't have ALTER command cx = op.get_context() if 'sqlite' in cx.connection.engine.name: with op.batch_alter_table("records") as batch_op: batch_op.add_column(sa.Column('direct_created', sa.TIMESTAMP)) else: op.add_column('records', sa.Column('direct_created', sa.TIMESTAMP))
def drop_data_samples_view(): db_dialect = op.get_context().dialect if 'postgresql' in db_dialect.name: postgres_drop_data_samples_view() return True else: print("Views were not dropped. '%s' is not a supported database dialect." % db_dialect.name) return False
def downgrade(): db_dialect = op.get_context().dialect if 'postgresql' in db_dialect.name: _postgresql_downgrade_ddl() else: print("Function generate_partition_triggers() was not downgraded. " "'%s' is not a supported database dialect." % db_dialect.name) return
def _drop_view(view): dialect = op.get_context().dialect.name if dialect in sql_drop_view_clauses and view in sql_drop_view_clauses[dialect]: op.execute(sql_drop_view_clauses[dialect][view]) return True else: print("Unable to drop view '{str_view}'. '{str_dialect}' is not a supported database dialect." . format(str_view=view, str_dialect=dialect)) return False
def recreate_views(): db_dialect = op.get_context().dialect if 'postgresql' in db_dialect.name: postgres_recreate_views() return True else: print("Views were not re-created. '%s' is not a supported database dialect." % db_dialect.name) return False
def upgrade(): dialect = op.get_context().dialect if dialect.name == 'sqlite': upgrade_sqlite() elif dialect.name == 'mysql': upgrade_mysql() elif dialect.name == 'postgresql': upgrade_postgresql()
def upgrade(): ### commands auto generated by Alembic - please adjust! ### connection = None if not op.get_context().as_sql: connection = op.get_bind() connection.execution_options(isolation_level='AUTOCOMMIT') op.execute("ALTER TYPE providers ADD value 'google' after 'facebook'") if connection is not None: connection.execution_options(isolation_level='READ_COMMITTED')
def upgrade(): conn = op.get_bind() meta = sa.MetaData(bind=conn) tv_episodes = sa.Table('tv_episodes', meta, autoload=True) with op.get_context().begin_transaction(): for row in conn.execute(tv_episodes.select()): conn.execute( f'UPDATE tv_episodes SET subtitles_lastsearch = "" WHERE tv_episodes.indexer_id = {row.indexer_id}' ) with op.batch_alter_table("tv_episodes") as batch_op: batch_op.alter_column('subtitles_lastsearch', type_=sa.DateTime(timezone=True)) with op.get_context().begin_transaction(): for row in conn.execute(tv_episodes.select()): conn.execute( f'UPDATE tv_episodes SET subtitles_lastsearch = {sa.func.current_timestamp()} WHERE tv_episodes.indexer_id = {row.indexer_id}' )
def enum_update(table_name, column_name, enum_name, enum_values): if op.get_context().bind.dialect.name != 'postgresql': if op.get_context().bind.dialect.name == 'mssql': op.drop_constraint('ck_musiconhold_mode_moh_mode_values', 'musiconhold') op.alter_column(table_name, column_name, type_=sa.Enum(*enum_values, name=enum_name)) return # Postgres requires a few more steps tmp = enum_name + '_tmp' op.execute('ALTER TYPE ' + enum_name + ' RENAME TO ' + tmp) updated = sa.Enum(*enum_values, name=enum_name) updated.create(op.get_bind(), checkfirst=False) op.execute('ALTER TABLE ' + table_name + ' ALTER COLUMN ' + column_name + ' TYPE ' + enum_name + ' USING mode::text::' + enum_name) op.execute('DROP TYPE ' + tmp)
def upgrade(): # Add new LCO API values to the followup_apis ENUM type with op.get_context().autocommit_block(): op.execute( "ALTER TYPE followup_apis ADD VALUE IF NOT EXISTS 'SINISTROAPI'") op.execute( "ALTER TYPE followup_apis ADD VALUE IF NOT EXISTS 'SPECTRALAPI'") op.execute( "ALTER TYPE followup_apis ADD VALUE IF NOT EXISTS 'FLOYDSAPI'") op.execute( "ALTER TYPE followup_apis ADD VALUE IF NOT EXISTS 'MUSCATAPI'")
def _drop_item(item): dialect = op.get_context().dialect.name if dialect in sql_drop_item_clauses and item in sql_drop_item_clauses[ dialect]: op.execute(sql_drop_item_clauses[dialect][item]) return True else: err_msg = ("Unable to drop item '{str_item}' " "is not a supported database dialect") print(err_msg.format(str_item=item, str_dialect=dialect)) return False
def table_has_column(table, column): config = op.get_context().config engine = engine_from_config(config.get_section(config.config_ini_section), prefix='sqlalchemy.') insp = reflection.Inspector.from_engine(engine) has_column = False for col in insp.get_columns(table): if column not in col['name']: continue has_column = True return has_column
def downgrade(): if op.get_context().dialect.name == 'postgresql': op.execute( "ALTER TYPE climatizationstate RENAME TO climatizationstate_old") op.execute( "CREATE TYPE climatizationstate AS ENUM('UNKNOWN', 'VENTILATION', 'COOLING', 'HEATING', 'OFF')" ) op.execute(( "ALTER TABLE transactions ALTER COLUMN climatizationstate TYPE climatizationstate USING " "climatizationstate::text::climatizationstate")) op.execute("DROP TYPE climatizationstate_old")
def upgrade(): ctx = op.get_context() dialect = ctx.dialect.name if dialect == 'postgresql': op.execute(""" SELECT SETVAL( 'analysis_info_id_seq', (SELECT MAX(id) + 1 FROM analysis_info) ) """)
def _drop_view(view): dialect = op.get_context().dialect.name if dialect in sql_drop_view_clauses and view in sql_drop_view_clauses[ dialect]: op.execute(sql_drop_view_clauses[dialect][view]) return True else: print( "Unable to drop view '{str_view}'. '{str_dialect}' is not a supported database dialect." .format(str_view=view, str_dialect=dialect)) return False
def upgrade(): conn = op.get_bind() meta = sa.MetaData(bind=conn) tv_shows = sa.Table('tv_shows', meta, autoload=True) with op.get_context().begin_transaction(): for row in conn.execute(tv_shows.select()): if len(row.lang) == 2: lang = babelfish.Language.fromalpha2(row.lang) conn.execute( f'UPDATE tv_shows SET lang = "{lang.alpha3}" WHERE tv_shows.series_id = {row.series_id}' )
def upgrade(): context = op.get_context() op.add_column('ps_contacts', sa.Column('endpoint', sa.String(40))) if context.bind.dialect.name != 'postgresql': op.alter_column('ps_contacts', 'expiration_time', type_=sa.BigInteger) else: op.execute('ALTER TABLE ps_contacts ALTER COLUMN expiration_time TYPE BIGINT USING expiration_time::bigint') op.create_index('ps_contacts_qualifyfreq_exp', 'ps_contacts', ['qualify_frequency', 'expiration_time']) op.create_index('ps_aors_qualifyfreq_contact', 'ps_aors', ['qualify_frequency', 'contact'])
def upgrade(): # Create the new enum ast_bool_values = ENUM(*AST_BOOL_VALUES, name=AST_BOOL_NAME, create_type=False) if op.get_context().bind.dialect.name == 'postgresql': ast_bool_values.create(op.get_bind(), checkfirst=False) # There is no direct way to convert from Integer to ENUM that is # not database specific so we transition through a string type. op.alter_column('ps_endpoints', 'mwi_subscribe_replaces_unsolicited', type_=sa.String(5)) op.alter_column('ps_endpoints', 'mwi_subscribe_replaces_unsolicited', type_=ast_bool_values, postgresql_using='mwi_subscribe_replaces_unsolicited::{0}'.format(AST_BOOL_NAME))
def downgrade(): with op.get_context().autocommit_block(): op.execute(""" ALTER TABLE services DROP CONSTRAINT ck_services_status, ADD CONSTRAINT ck_services_status CHECK(status::text = ANY(ARRAY['disabled', 'enabled', 'published'])); """) op.execute(""" ALTER TABLE archived_services DROP CONSTRAINT ck_archived_services_status, ADD CONSTRAINT ck_archived_services_status CHECK(status::text = ANY(ARRAY['disabled', 'enabled', 'published'])); """)
def downgrade(): connection = None if not op.get_context().as_sql: connection = op.get_bind() connection.execution_options(isolation_level='AUTOCOMMIT') op.execute("ALTER TYPE log_type DROP VALUE 'create_cert'") op.execute("ALTER TYPE log_type DROP VALUE 'update_cert'") if connection is not None: connection.execution_options(isolation_level='READ_COMMITTED')
def upgrade(): # ALTER TYPE doesn't work from inside a transaction, disable it connection = None if not op.get_context().as_sql: connection = op.get_bind() connection.execution_options(isolation_level="AUTOCOMMIT") op.execute("ALTER TYPE scrapertype ADD VALUE IF NOT EXISTS 'YOUTUBE'") # re-activate the transaction for any future migrations if connection is not None: connection.execution_options(isolation_level="READ_COMMITTED")
def upgrade(): # manually entered if op.get_context().dialect.name == 'postgresql': # https://alembic.sqlalchemy.org/en/latest/api/runtime.html#alembic.runtime.migration.MigrationContext.autocommit_block with op.get_context().autocommit_block(): op.execute("ALTER TYPE taskstatus ADD VALUE 'created'") else: # sqlite uses varchar + constraint for enum types taskstatus_enum = sa.Enum( 'created', 'pending', 'running', 'success', 'failed', 'skipped', name='taskstatus', ) with op.batch_alter_table("tasks") as batch_op: batch_op.alter_column("status", type_=taskstatus_enum)
def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_table('rooms_slots') op.drop_table('presentations') op.drop_table('slots') op.drop_table('days') op.drop_table('rooms') ### end Alembic commands ### context = op.get_context() if context.bind.dialect.name == 'postgresql': op.execute('DROP TYPE slotkind')
def upgrade(): # proxy/table info is no longer in the database op.drop_table('proxies') op.drop_table('hubs') # drop some columns no longer in use try: op.drop_column('users', 'auth_state') # mysql cannot drop _server_id without also dropping # implicitly created foreign key if op.get_context().dialect.name == 'mysql': op.drop_constraint('users_ibfk_1', 'users', type_='foreignkey') op.drop_column('users', '_server_id') except sa.exc.OperationalError: # this won't be a problem moving forward, but downgrade will fail if op.get_context().dialect.name == 'sqlite': logger.warning("sqlite cannot drop columns. Leaving unused old columns in place.") else: raise op.add_column('users', sa.Column('encrypted_auth_state', sa.types.LargeBinary))
def column_downgrade(table_name, column_name, enum_name, enum_values): if op.get_context().bind.dialect.name != 'postgresql': op.alter_column(table_name, column_name, type_=sa.Enum(*enum_values, name=enum_name)) return # Postgres requires a few more steps updated = sa.Enum(*enum_values, name=enum_name) updated.create(op.get_bind(), checkfirst=False) op.execute('ALTER TABLE ' + table_name + ' ALTER COLUMN ' + column_name + ' TYPE ' + enum_name + ' USING identify_by::text::' + enum_name)
def downgrade(): bind = op.get_context().bind inspector = reflection.Inspector.from_engine(bind) has_tables = inspector.get_table_names() if 'event_logs' in has_tables: columns = [x.get('name') for x in inspector.get_columns('event_logs')] if 'asset_key' in columns: op.drop_column('event_logs', 'asset_key') op.drop_index('idx_asset_key', 'event_logs') # also drop the index that was missing from the step_key migration op.drop_index('idx_step_key', 'event_logs')
def upgrade(): schema = context.get_context().config.get_main_option('schema') staticschema = schema + '_static' parentschema = context.get_context().config.get_main_option('parentschema') engine = op.get_bind().engine if type(engine).__name__ != 'MockConnection' and \ op.get_context().dialect.has_table( engine, 'user', schema=staticschema): # pragma: no cover return op.create_table( 'user', Column('type', String(10), nullable=False), Column('id', Integer, primary_key=True), Column('username', Unicode, unique=True, nullable=False), Column('password', Unicode, nullable=False), Column('email', Unicode, nullable=False), Column('is_password_changed', Boolean, default=False), Column('role_name', String), schema=staticschema, ) parent_column = '' parent_select = '' parent_join = '' if parentschema is not None and parentschema is not '': # pragma: no cover op.add_column('user', Column('parent_role_name', String), schema=staticschema) parent_column = ', parent_role_name' parent_select = ', pr.name' parent_join = ( 'LEFT OUTER JOIN %(parentschema)s.role AS pr ON (pr.id = u.parent_role_id)' % { 'parentschema': parentschema, }) op.execute( 'INSERT INTO %(staticschema)s.user ' '(type, username, password, email, is_password_changed, role_name%(parent_column)s) (' 'SELECT u.type, u.username, u.password, u.email, ' 'u.is_password_changed, r.name%(parent_select)s ' 'FROM %(schema)s.user AS u ' 'LEFT OUTER JOIN %(schema)s.role AS r ON (r.id = u.role_id) %(parent_join)s' ')' % { 'staticschema': staticschema, 'schema': schema, 'parent_select': parent_select, 'parent_column': parent_column, 'parent_join': parent_join, }) op.drop_table('user', schema=schema)
def upgrade(): bind = op.get_context().bind inspector = reflection.Inspector.from_engine(bind) has_tables = inspector.get_table_names() if "event_logs" in has_tables: columns = [x.get("name") for x in inspector.get_columns("event_logs")] if "asset_key" not in columns: op.add_column("event_logs", sa.Column("asset_key", sa.String)) op.create_index("idx_asset_key", "event_logs", ["asset_key"], unique=False) # also add index that was missing from the step_key migration op.create_index("idx_step_key", "event_logs", ["step_key"], unique=False)
def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_index(op.f('ix_workflow_instances_start_date'), table_name='workflow_instances') op.drop_index(op.f('ix_workflow_instances_end_date'), table_name='workflow_instances') op.drop_index(op.f('ix_workflow_instances_created_date'), table_name='workflow_instances') op.drop_index(op.f('ix_stage_instances_start_date'), table_name='stage_instances') op.drop_index(op.f('ix_stage_instances_end_date'), table_name='stage_instances') op.drop_index(op.f('ix_stage_instances_created_date'), table_name='stage_instances') op.drop_constraint(None, 'qa_tests', type_='foreignkey') op.drop_index(op.f('ix_qa_tests_qa_test_type_id'), table_name='qa_tests') op.drop_column('qa_tests', 'qa_test_type_id') op.drop_index(op.f('ix_qa_test_mappings_feature_id'), table_name='qa_test_mappings') op.drop_index(op.f('ix_qa_test_mappings_behavior_id'), table_name='qa_test_mappings') op.drop_index(op.f('ix_qa_test_mappings_area_id'), table_name='qa_test_mappings') if 'sqlite' != op.get_context().dialect.name: op.alter_column('qa_test_mappings', 'feature_id', existing_type=sa.INTEGER(), nullable=False) op.drop_constraint(None, 'qa_features', type_='foreignkey') op.drop_index(op.f('ix_qa_features_product_id'), table_name='qa_features') op.drop_column('qa_features', 'product_id') op.drop_constraint(None, 'qa_behavior_points', type_='foreignkey') op.drop_index(op.f('ix_qa_behavior_points_product_id'), table_name='qa_behavior_points') op.drop_column('qa_behavior_points', 'product_id') op.drop_constraint(None, 'qa_areas', type_='foreignkey') op.drop_index(op.f('ix_qa_areas_product_id'), table_name='qa_areas') op.drop_column('qa_areas', 'product_id') op.drop_index(op.f('ix_qa_products_vcs_id'), table_name='qa_products') op.drop_index(op.f('ix_qa_products_name'), table_name='qa_products') op.drop_index(op.f('ix_qa_products_id'), table_name='qa_products') op.drop_table('qa_products') op.drop_index(op.f('ix_qa_test_types_id'), table_name='qa_test_types') op.drop_table('qa_test_types')
def downgrade(active_plugins=None, options=None): context = op.get_context() dialect = context.bind.dialect.name # Drop the existed foreign key constraints # In order to perform primary key changes op.drop_constraint( name=fk_names[dialect]['l3_agent_id'], table_name=TABLE_NAME, type_='foreignkey' ) op.drop_constraint( name=fk_names[dialect]['router_id'], table_name=TABLE_NAME, type_='foreignkey' ) op.drop_constraint( name=PK_NAME, table_name=TABLE_NAME, type_='primary' ) op.add_column( TABLE_NAME, sa.Column('id', sa.String(32)) ) # Restore the foreign key constraints op.create_foreign_key( name=fk_names[dialect]['router_id'], source=TABLE_NAME, referent='routers', local_cols=['router_id'], remote_cols=['id'], ondelete='CASCADE' ) op.create_foreign_key( name=fk_names[dialect]['l3_agent_id'], source=TABLE_NAME, referent='agents', local_cols=['l3_agent_id'], remote_cols=['id'], ondelete='CASCADE' ) op.create_primary_key( name=PK_NAME, table_name=TABLE_NAME, cols=['id'] )
def downgrade(): with op.get_context().autocommit_block(): op.execute( "CREATE INDEX CONCURRENTLY IF NOT EXISTS ind_message_otherid ON message (otherid)" ) op.execute( "CREATE INDEX CONCURRENTLY IF NOT EXISTS ind_message_userid ON message (userid)" ) op.execute( "DROP INDEX CONCURRENTLY IF EXISTS ind_message_otherid_noteid") op.execute( "DROP INDEX CONCURRENTLY IF EXISTS ind_message_userid_noteid")