def upgrade(migrate_engine): metadata = MetaData() metadata.bind = migrate_engine user_table = Table('user', metadata, autoload=True) # name_column = user_table.c.name unique_name_constraint = UniqueConstraint('name', table=user_table) unique_name_constraint.create()
def downgrade(migrate_engine): meta.bind = migrate_engine domains_table = Table('domains', meta, autoload=True) constraint = UniqueConstraint('name', name='name', table=domains_table) constraint.create()
def upgrade(migrate_engine): meta.bind = migrate_engine # Load the TSIG Keys tables tsigkeys_table = Table('tsigkeys', meta, autoload=True) # Create the scope and resource columns scope_col = Column('scope', Enum(name='tsig_scopes', *TSIG_SCOPES), nullable=False, server_default='POOL') scope_col.create(tsigkeys_table) # Start with nullable=True and populate_default=True, then convert # to nullable=False once all rows have been populted with a resource_id resource_id_col = Column('resource_id', UUID, default=default_pool_id, nullable=True) resource_id_col.create(tsigkeys_table, populate_default=True) # Now that we've populated the default pool id in existing rows, MySQL # will let us convert this over to nullable=False tsigkeys_table.c.resource_id.alter(nullable=False) dialect = migrate_engine.url.get_dialect().name if dialect.startswith('sqlite'): # Add missing unique index constraint = UniqueConstraint('name', name='unique_tsigkey_name', table=tsigkeys_table) constraint.create()
def upgrade(migrate_engine): meta.bind = migrate_engine t = Table("users_studies", meta, autoload=True) cons = UniqueConstraint(t.c.user_id, t.c.study_id) cons.create()
def fix_CollectionItem_v0_constraint(db_conn): """Add the forgotten Constraint on CollectionItem""" global collectionitem_unique_constraint_done if collectionitem_unique_constraint_done: # Reset it. Maybe the whole thing gets run again # For a different db? collectionitem_unique_constraint_done = False return metadata = MetaData(bind=db_conn.bind) CollectionItem_table = inspect_table(metadata, 'core__collection_items') constraint = UniqueConstraint( 'collection', 'media_entry', name='core__collection_items_collection_media_entry_key', table=CollectionItem_table) try: constraint.create() except ProgrammingError: # User probably has an install that was run since the # collection tables were added, so we don't need to run this migration. pass db_conn.commit()
def upgrade(migrate_engine): metadata.bind = migrate_engine table = Table('notification', metadata, autoload=True) cons = UniqueConstraint('event_id', 'user_id', table=table) cons.create()
def unique_collections_slug(db): """Add unique constraint to collection slug""" metadata = MetaData(bind=db.bind) collection_table = inspect_table(metadata, "core__collections") existing_slugs = {} slugs_to_change = [] for row in db.execute(collection_table.select()): # if duplicate slug, generate a unique slug if row.creator in existing_slugs and row.slug in \ existing_slugs[row.creator]: slugs_to_change.append(row.id) else: if not row.creator in existing_slugs: existing_slugs[row.creator] = [row.slug] else: existing_slugs[row.creator].append(row.slug) for row_id in slugs_to_change: new_slug = unicode(uuid.uuid4()) db.execute(collection_table.update().where( collection_table.c.id == row_id).values(slug=new_slug)) # sqlite does not like to change the schema when a transaction(update) is # not yet completed db.commit() constraint = UniqueConstraint('creator', 'slug', name='core__collection_creator_slug_key', table=collection_table) constraint.create() db.commit()
def downgrade(migrate_engine): meta.bind = migrate_engine # Domains Table domains_table = Table("domains", meta, autoload=True) rev_ind = Index("reverse_name_deleted", domains_table.c.reverse_name, domains_table.c.deleted) rev_ind.drop(migrate_engine) # Recordsets Table rsets_table = Table("recordsets", meta, autoload=True) rev_ind = Index("reverse_name_dom_id", rsets_table.c.reverse_name, rsets_table.c.domain_id) rev_ind.drop(migrate_engine) domains_table.c.reverse_name.drop() rsets_table.c.reverse_name.drop() # Recreate constraints for SQLite dialect = migrate_engine.url.get_dialect().name if dialect.startswith("sqlite"): domains_constraint = UniqueConstraint("name", "deleted", name="unique_domain_name", table=domains_table) recordsets_constraint = UniqueConstraint( "domain_id", "name", "type", name="unique_recordset", table=rsets_table ) domains_constraint.create() recordsets_constraint.create()
def downgrade(migrate_engine): meta.bind = migrate_engine domains_table = Table('domains', meta, autoload=True) constraint = UniqueConstraint('name', name='name', table=domains_table) constraint.create()
def downgrade(migrate_engine): meta.bind = migrate_engine RESOURCE_STATUSES = ['ACTIVE', 'PENDING', 'DELETED'] # Get associated database tables domains_table = Table('domains', meta, autoload=True) records_table = Table('records', meta, autoload=True) # Downgrade the domains table. domains_table.c.status.alter( type=Enum(name='resource_statuses', *RESOURCE_STATUSES), default='ACTIVE', server_default='ACTIVE') domains_table.c.action.drop() # Re-add constraint for sqlite. dialect = migrate_engine.url.get_dialect().name if dialect.startswith('sqlite'): constraint = UniqueConstraint( 'name', 'deleted', name='unique_domain_name', table=domains_table) constraint.create() # Downgrade the records table. records_table.c.status.alter( type=Enum(name='resource_statuses', *RESOURCE_STATUSES), default='ACTIVE', server_default='ACTIVE') records_table.c.action.drop() records_table.c.serial.drop() # Re-add constraint for sqlite. if dialect.startswith('sqlite'): constraint = UniqueConstraint( 'hash', name='unique_record', table=records_table) constraint.create()
def fix_CollectionItem_v0_constraint(db_conn): """Add the forgotten Constraint on CollectionItem""" global collectionitem_unique_constraint_done if collectionitem_unique_constraint_done: # Reset it. Maybe the whole thing gets run again # For a different db? collectionitem_unique_constraint_done = False return metadata = MetaData(bind=db_conn.bind) CollectionItem_table = inspect_table(metadata, 'core__collection_items') constraint = UniqueConstraint('collection', 'media_entry', name='core__collection_items_collection_media_entry_key', table=CollectionItem_table) try: constraint.create() except ProgrammingError: # User probably has an install that was run since the # collection tables were added, so we don't need to run this migration. pass db_conn.commit()
def upgrade(migrate_engine): meta.bind = migrate_engine # Load the TSIG Keys tables tsigkeys_table = Table('tsigkeys', meta, autoload=True) scopes = Enum(name='tsig_scopes', metadata=meta, *TSIG_SCOPES) scopes.create() # Create the scope and resource columns scope_col = Column('scope', scopes, nullable=False, server_default='POOL') scope_col.create(tsigkeys_table) # Start with nullable=True and populate_default=True, then convert # to nullable=False once all rows have been populted with a resource_id resource_id_col = Column('resource_id', UUID, default=default_pool_id, nullable=True) resource_id_col.create(tsigkeys_table, populate_default=True) # Now that we've populated the default pool id in existing rows, MySQL # will let us convert this over to nullable=False tsigkeys_table.c.resource_id.alter(nullable=False) dialect = migrate_engine.url.get_dialect().name if dialect.startswith('sqlite'): # Add missing unique index constraint = UniqueConstraint('name', name='unique_tsigkey_name', table=tsigkeys_table) constraint.create()
def upgrade(migrate_engine): metadata = MetaData() metadata.bind = migrate_engine user_table = Table('user', metadata, autoload=True) # name_column = user_table.c.name unique_name_constraint = UniqueConstraint('name', table=user_table) unique_name_constraint.create()
def upgrade(migrate_engine): meta = sqlalchemy.MetaData(bind=migrate_engine) event = sqlalchemy.Table('event', meta, autoload=True) message_id = sqlalchemy.Column('message_id', sqlalchemy.String(50)) event.create_column(message_id) cons = UniqueConstraint('message_id', table=event) cons.create() index = sqlalchemy.Index('idx_event_message_id', models.Event.message_id) index.create(bind=migrate_engine) # Populate the new column ... trait = sqlalchemy.Table('trait', meta, autoload=True) unique_name = sqlalchemy.Table('unique_name', meta, autoload=True) join = trait.join(unique_name, unique_name.c.id == trait.c.name_id) traits = sqlalchemy.select([trait.c.event_id, trait.c.t_string], whereclause=(unique_name.c.key == 'message_id'), from_obj=join) for event_id, value in traits.execute(): event.update().\ where(event.c.id == event_id).\ values(message_id=value).\ execute()
def downgrade(migrate_engine): meta.bind = migrate_engine keys = Enum(name='key', metadata=meta, *ZONE_ATTRIBUTE_KEYS) types = Enum(name='types', metadata=meta, *ZONE_TYPES) domains_attributes_table = Table('domain_attributes', meta, autoload=True) domains_table = Table('domains', meta, autoload=True) domains = select(columns=[domains_table.c.id, domains_table.c.type])\ .where(domains_table.c.type == 'SECONDARY')\ .execute().fetchall() for dom in domains: delete = domains_table.delete()\ .where(domains_table.id == dom.id) delete.execute() domains_table.c.type.drop() domains_table.c.transferred_at.drop() domains_attributes_table.drop() keys.drop() types.drop() dialect = migrate_engine.url.get_dialect().name if dialect.startswith('sqlite'): constraint = UniqueConstraint( 'name', 'deleted', name='unique_domain_name', table=domains_table) # Add missing unique index constraint.create()
def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) submissions = Table('submissions', meta, autoload=True) slugc = Column('slug', UnicodeText) slugc.create(submissions) slugu = UniqueConstraint(slugc) slugu.create()
def upgrade(migrate_engine): meta.bind = migrate_engine domains_table = Table('domains', meta, autoload=True) # Create the new columns deleted_column = Column('deleted', CHAR(32), nullable=False, default="0", server_default="0") deleted_column.create(domains_table, populate_default=True) deleted_at_column = Column('deleted_at', DateTime, nullable=True, default=None) deleted_at_column.create(domains_table, populate_default=True) # Drop the old single column unique # NOTE(kiall): It appears this does nothing. Miration 17 has been added. # leaving this here for reference. domains_table.c.name.alter(unique=False) # Add a new multi-column unique index constraint = UniqueConstraint('name', 'deleted', name='unique_domain_name', table=domains_table) constraint.create()
def downgrade(migrate_engine): meta.bind = migrate_engine # Domains Table domains_table = Table('domains', meta, autoload=True) rev_ind = Index('reverse_name_deleted', domains_table.c.reverse_name, domains_table.c.deleted) rev_ind.drop(migrate_engine) # Recordsets Table rsets_table = Table('recordsets', meta, autoload=True) rev_ind = Index('reverse_name_dom_id', rsets_table.c.reverse_name, rsets_table.c.domain_id) rev_ind.drop(migrate_engine) domains_table.c.reverse_name.drop() rsets_table.c.reverse_name.drop() # Recreate constraints for SQLite dialect = migrate_engine.url.get_dialect().name if dialect.startswith('sqlite'): domains_constraint = UniqueConstraint('name', 'deleted', name='unique_domain_name', table=domains_table) recordsets_constraint = UniqueConstraint('domain_id', 'name', 'type', name='unique_recordset', table=rsets_table) domains_constraint.create() recordsets_constraint.create()
def upgrade(migrate_engine): meta.bind = migrate_engine domains_table = Table('domains', meta, autoload=True) # Get the default pool_id from the config file default_pool_id = cfg.CONF['service:central'].default_pool_id # Create the pool_id column pool_id_column = Column('pool_id', UUID(), default=default_pool_id, nullable=True) pool_id_column.create(domains_table, populate_default=True) # Alter the table to drop default value after populating it domains_table.c.pool_id.alter(default=None) dialect = migrate_engine.url.get_dialect().name if dialect.startswith('sqlite'): # Add missing unique index constraint = UniqueConstraint('name', 'deleted', name='unique_domain_name', table=domains_table) constraint.create()
def unique_collections_slug(db): """Add unique constraint to collection slug""" metadata = MetaData(bind=db.bind) collection_table = inspect_table(metadata, "core__collections") existing_slugs = {} slugs_to_change = [] for row in db.execute(collection_table.select()): # if duplicate slug, generate a unique slug if row.creator in existing_slugs and row.slug in \ existing_slugs[row.creator]: slugs_to_change.append(row.id) else: if not row.creator in existing_slugs: existing_slugs[row.creator] = [row.slug] else: existing_slugs[row.creator].append(row.slug) for row_id in slugs_to_change: new_slug = unicode(uuid.uuid4()) db.execute(collection_table.update(). where(collection_table.c.id == row_id). values(slug=new_slug)) # sqlite does not like to change the schema when a transaction(update) is # not yet completed db.commit() constraint = UniqueConstraint('creator', 'slug', name='core__collection_creator_slug_key', table=collection_table) constraint.create() db.commit()
def upgrade(migrate_engine): meta.bind = migrate_engine t = Table("blinding_type", meta, autoload=True) cons = UniqueConstraint(t.c.blinding_set_id, t.c.name, t.c.duplicate_number) cons.create()
def upgrade(migrate_engine): # ignore reflection warnings with warnings.catch_warnings(): warnings.simplefilter("ignore", category=sa_exc.SAWarning) metadata = MetaData() metadata.bind = migrate_engine user_table = Table('user', metadata, autoload=True) # name_column = user_table.c.name unique_name_constraint = UniqueConstraint('name', table=user_table) unique_name_constraint.create()
def upgrade(migrate_engine): # ignore reflection warnings with warnings.catch_warnings(): warnings.simplefilter("ignore", category=sa_exc.SAWarning) metadata = MetaData() metadata.bind = migrate_engine user_table = Table('user', metadata, autoload=True) # name_column = user_table.c.name unique_name_constraint = UniqueConstraint('name', table=user_table) unique_name_constraint.create()
def upgrade(migrate_engine): meta.bind = migrate_engine pool_attributes_table = Table('pool_attributes', meta, autoload=True) # Create UniqueConstraint constraint = UniqueConstraint('pool_id', 'key', 'value', name='unique_pool_attribute', table=pool_attributes_table) constraint.create()
def upgrade(migrate_engine): meta.bind = migrate_engine RESOURCE_STATUSES = ['ACTIVE', 'PENDING', 'DELETED', 'ERROR'] ACTIONS = ['CREATE', 'DELETE', 'UPDATE', 'NONE'] # Get associated database tables domains_table = Table('domains', meta, autoload=True) records_table = Table('records', meta, autoload=True) # Upgrade the domains table. domains_table.c.status.alter(type=Enum(name='resource_statuses', *RESOURCE_STATUSES), default='PENDING', server_default='PENDING') action_column = Column('action', Enum(name='actions', *ACTIONS), default='CREATE', server_default='CREATE', nullable=False) action_column.create(domains_table) # Re-add constraint for sqlite. dialect = migrate_engine.url.get_dialect().name if dialect.startswith('sqlite'): constraint = UniqueConstraint('name', 'deleted', name='unique_domain_name', table=domains_table) constraint.create() # Upgrade the records table. records_table.c.status.alter(type=Enum(name='resource_statuses', *RESOURCE_STATUSES), default='PENDING', server_default='PENDING') action_column = Column('action', Enum(name='actions', *ACTIONS), default='CREATE', server_default='CREATE', nullable=False) action_column.create(records_table) serial_column = Column('serial', Integer(), server_default='1', nullable=False) serial_column.create(records_table) # Re-add constraint for sqlite. if dialect.startswith('sqlite'): constraint = UniqueConstraint('hash', name='unique_record', table=records_table) constraint.create()
def upgrade(migrate_engine): meta.bind = migrate_engine dialect = migrate_engine.url.get_dialect().name if dialect.startswith('sqlite'): records_table = Table('records', meta, autoload=True) # Add missing unique index constraint = UniqueConstraint('hash', name='unique_recordset', table=records_table) constraint.create()
def downgrade(migrate_engine): meta.bind = migrate_engine dialect = migrate_engine.url.get_dialect().name zones_table = Table('domains', meta, autoload=True) records_table = Table('records', meta, autoload=True) RECORD_TYPES = ['A', 'AAAA', 'CNAME', 'MX', 'SRV', 'TXT', 'SPF', 'NS', 'PTR', 'SSHFP'] recordsets_table = Table('recordsets', meta, autoload=True) # Delete all SOA records recordsets_table.delete().where(recordsets_table.c.type == 'SOA').execute() # Remove SOA from the ENUM recordsets_table.c.type.alter(type=Enum(name='recordset_types', *RECORD_TYPES)) # Remove non-delegated NS records # Get all the zones zones = select( columns=[ zones_table.c.id, zones_table.c.created_at, zones_table.c.tenant_id, zones_table.c.name, zones_table.c.email, zones_table.c.serial, zones_table.c.refresh, zones_table.c.retry, zones_table.c.expire, zones_table.c.minimum ] ).execute().fetchall() for zone in zones: # for each zone, get all non-delegated NS recordsets results = recordsets_table.select().\ where(recordsets_table.c.type == 'NS').\ where(recordsets_table.c.name == zone.name).execute() for r in results: records_table.delete().\ where(records_table.c.recordset_id == r.id).\ where(records_table.c.managed == 1).execute() # NOTE: The value 1 is used instead of True because flake8 complains # Re-add the constraint for sqlite if dialect.startswith('sqlite'): constraint = UniqueConstraint('domain_id', 'name', 'type', name='unique_recordset', table=recordsets_table) constraint.create()
def pw_hash_nullable(db): """Make pw_hash column nullable""" metadata = MetaData(bind=db.bind) user_table = inspect_table(metadata, "core__users") user_table.c.pw_hash.alter(nullable=True) # sqlite+sqlalchemy seems to drop this constraint during the # migration, so we add it back here for now a bit manually. if db.bind.url.drivername == 'sqlite': constraint = UniqueConstraint('username', table=user_table) constraint.create() db.commit()
def downgrade(migrate_engine): meta.bind = migrate_engine domains_table = Table('domains', meta, autoload=True) constraint = UniqueConstraint('name', 'deleted', 'pool_id', name=CONSTRAINT_NAME, table=domains_table) constraint.drop() constraint = UniqueConstraint('name', 'deleted', name=CONSTRAINT_NAME, table=domains_table) constraint.create()
def pw_hash_nullable(db): """Make pw_hash column nullable""" metadata = MetaData(bind=db.bind) user_table = inspect_table(metadata, "core__users") user_table.c.pw_hash.alter(nullable=True) # sqlite+sqlalchemy seems to drop this constraint during the # migration, so we add it back here for now a bit manually. if db.bind.url.drivername == 'sqlite': constraint = UniqueConstraint('username', table=user_table) constraint.create() db.commit()
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine service_statuses_table = Table('service_statuses', meta, autoload=True) # Add UniqueConstraint based on service_name and hostname. constraint = UniqueConstraint('service_name', 'hostname', table=service_statuses_table, name="unique_service_status") try: constraint.create() except exc.IntegrityError as e: LOG.error(EXPLANATION, e) # Use sys.exit so we don't blow up with a huge trace sys.exit(1)
def downgrade(migrate_engine): meta.bind = migrate_engine dialect = migrate_engine.url.get_dialect().name zones_table = Table('domains', meta, autoload=True) records_table = Table('records', meta, autoload=True) RECORD_TYPES = [ 'A', 'AAAA', 'CNAME', 'MX', 'SRV', 'TXT', 'SPF', 'NS', 'PTR', 'SSHFP' ] recordsets_table = Table('recordsets', meta, autoload=True) # Delete all SOA records recordsets_table.delete().where(recordsets_table.c.type == 'SOA').execute() # Remove SOA from the ENUM recordsets_table.c.type.alter( type=Enum(name='recordset_types', *RECORD_TYPES)) # Remove non-delegated NS records # Get all the zones zones = select(columns=[ zones_table.c.id, zones_table.c.created_at, zones_table.c.tenant_id, zones_table.c.name, zones_table.c.email, zones_table.c.serial, zones_table.c.refresh, zones_table.c.retry, zones_table.c.expire, zones_table.c.minimum ]).execute().fetchall() for zone in zones: # for each zone, get all non-delegated NS recordsets results = recordsets_table.select().\ where(recordsets_table.c.type == 'NS').\ where(recordsets_table.c.name == zone.name).execute() for r in results: records_table.delete().\ where(records_table.c.recordset_id == r.id).\ where(records_table.c.managed == 1).execute() # NOTE: The value 1 is used instead of True because flake8 complains # Re-add the constraint for sqlite if dialect.startswith('sqlite'): constraint = UniqueConstraint('domain_id', 'name', 'type', name='unique_recordset', table=recordsets_table) constraint.create()
def downgrade(migrate_engine): meta.bind = migrate_engine # Load the TSIG Keys tables tsigkeys_table = Table('tsigkeys', meta, autoload=True) # Create the scope and resource columns tsigkeys_table.c.scope.drop() tsigkeys_table.c.resource_id.drop() dialect = migrate_engine.url.get_dialect().name if dialect.startswith('sqlite'): # Add missing unique index constraint = UniqueConstraint('name', name='unique_tsigkey_name', table=tsigkeys_table) constraint.create()
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine service_statuses_table = Table('service_statuses', meta, autoload=True) # Add UniqueConstraint based on service_name and hostname. constraint = UniqueConstraint('service_name', 'hostname', table=service_statuses_table, name="unique_service_status") try: constraint.create() except exc.IntegrityError as e: LOG.error(EXPLANATION, e) # Use sys.exit so we don't blow up with a huge trace sys.exit(1)
def upgrade(migrate_engine): meta.bind = migrate_engine connection = migrate_engine.connect() dialect = migrate_engine.url.get_dialect().name if dialect.startswith("sqlite"): connection.connection.create_function("reverse", 1, lambda s: s[::-1]) # Domains Table domains_table = Table("domains", meta, autoload=True) reverse_name_col = Column("reverse_name", String(255), nullable=True, server_default="") reverse_name_col.create(domains_table) update = domains_table.update().values(reverse_name=func.reverse(domains_table.c.name)) connection.execute(update) domains_table.c.reverse_name.alter(nullable=False, default=None) rev_ind = Index("reverse_name_deleted", domains_table.c.reverse_name, domains_table.c.deleted) rev_ind.create(connection) # Recordsets Table rsets_table = Table("recordsets", meta, autoload=True) reverse_name_col = Column("reverse_name", String(255), nullable=True, server_default="") reverse_name_col.create(rsets_table) update = rsets_table.update().values(reverse_name=func.reverse(rsets_table.c.name)) connection.execute(update) rsets_table.c.reverse_name.alter(nullable=False, default=None) rev_ind = Index("reverse_name_dom_id", rsets_table.c.reverse_name, rsets_table.c.domain_id) rev_ind.create(connection) # Recreate constraints for SQLite if dialect.startswith("sqlite"): domains_constraint = UniqueConstraint("name", "deleted", name="unique_domain_name", table=domains_table) recordsets_constraint = UniqueConstraint( "domain_id", "name", "type", name="unique_recordset", table=rsets_table ) domains_constraint.create() recordsets_constraint.create()
def downgrade(migrate_engine): meta.bind = migrate_engine domains_table = Table('domains', meta, autoload=True) constraint = UniqueConstraint('name', 'deleted', 'pool_id', name=CONSTRAINT_NAME, table=domains_table) constraint.drop() constraint = UniqueConstraint('name', 'deleted', name=CONSTRAINT_NAME, table=domains_table) constraint.create()
def upgrade(migrate_engine): meta.bind = migrate_engine keys = Enum(name='key', *ZONE_ATTRIBUTE_KEYS) domain_attributes_table = Table( 'domain_attributes', meta, Column('id', UUID(), default=utils.generate_uuid, primary_key=True), Column('version', Integer(), default=1, nullable=False), Column('created_at', DateTime, default=lambda: timeutils.utcnow()), Column('updated_at', DateTime, onupdate=lambda: timeutils.utcnow()), Column('key', keys), Column('value', String(255), nullable=False), Column('domain_id', UUID(), nullable=False), UniqueConstraint('key', 'value', 'domain_id', name='unique_attributes'), ForeignKeyConstraint(['domain_id'], ['domains.id'], ondelete='CASCADE'), mysql_engine='INNODB', mysql_charset='utf8' ) domains_table = Table('domains', meta, autoload=True) types = Enum(name='types', metadata=meta, *ZONE_TYPES) types.create() # Add type and transferred_at to domains type_ = Column('type', types, default='PRIMARY', server_default='PRIMARY') transferred_at = Column('transferred_at', DateTime, default=None) type_.create(domains_table, populate_default=True) transferred_at.create(domains_table, populate_default=True) domain_attributes_table.create() dialect = migrate_engine.url.get_dialect().name if dialect.startswith('sqlite'): constraint = UniqueConstraint( 'name', 'deleted', name='unique_domain_name', table=domains_table) # Add missing unique index constraint.create()
def downgrade(migrate_engine): meta.bind = migrate_engine # Load the TSIG Keys tables tsigkeys_table = Table('tsigkeys', meta, autoload=True) scopes = Enum(name='tsig_scopes', metadata=meta, *TSIG_SCOPES) # Create the scope and resource columns tsigkeys_table.c.scope.drop() tsigkeys_table.c.resource_id.drop() scopes.drop() dialect = migrate_engine.url.get_dialect().name if dialect.startswith('sqlite'): # Add missing unique index constraint = UniqueConstraint('name', name='unique_tsigkey_name', table=tsigkeys_table) constraint.create()
def upgrade(migrate_engine): meta.bind = migrate_engine field.create() field_type = Table("field_type", meta, autoload=True) study = Table("study", meta, autoload=True) fk_field_type = ForeignKeyConstraint([field.c.field_type_id], [field_type.c.id]) fk_field_type.create() fk_study = ForeignKeyConstraint([field.c.study_id], [study.c.id]) fk_study.create() uk_study_id_order = UniqueConstraint(field.c.study_id, field.c.order, table=field) uk_study_id_order.create()
def upgrade(migrate_engine): meta.bind = migrate_engine dialect = migrate_engine.url.get_dialect().name if dialect.startswith('sqlite'): domains_table = Table('domains', meta, autoload=True) servers_table = Table('servers', meta, autoload=True) # Add missing multi-column unique index constraint = UniqueConstraint('name', 'deleted', name='unique_domain_name', table=domains_table) constraint.create() # Add a missing unique index constraint = UniqueConstraint('name', name='unique_server_name', table=servers_table) constraint.create()
def upgrade(migrate_engine): meta.bind = migrate_engine t = Table( "statement", meta, Column("id", Integer, primary_key=True), Column("type", NVARCHAR(100), index=True, nullable=False), Column("name", NVARCHAR(100), index=True, nullable=False), Column("statement", UnicodeText, nullable=False), Column("last_updated_datetime", DateTime, nullable=False), Column("last_updated_by_user_id", Integer, nullable=False), ) t.create() cons = UniqueConstraint(t.c.type, t.c.name, name='ux__statement__type__name') cons.create()
def upgrade(migrate_engine): meta.bind = migrate_engine t = Table( "observation", meta, Column("id", Integer, primary_key=True), Column("observation_id", Integer, index=True, nullable=False), Column("uhl_system_number", NVARCHAR(50), index=True, nullable=False), Column("observation_datetime", DateTime), Column("observation_name", NVARCHAR(100)), Column("observation_value", NVARCHAR(50)), Column("observation_ews", NVARCHAR(50)), Column("observation_units", NVARCHAR(50)), ) t.create() cons = UniqueConstraint(t.c.observation_id, t.c.observation_name, name='ix_observation__id__name') cons.create()
def upgrade(migrate_engine): meta.bind = migrate_engine pool_ns_records_table = Table('pool_ns_records', meta, autoload=True) # Only apply it if it's not there (It's been backported to L) constraints = [i.name for i in pool_ns_records_table.constraints] if CONSTRAINT_NAME not in constraints: # We define the constraint here if not it shows in the list above. constraint = UniqueConstraint('pool_id', 'hostname', name=CONSTRAINT_NAME, table=pool_ns_records_table) try: constraint.create() except exc.IntegrityError as e: LOG.error(explanation, e) # Use sys.exit so we dont blow up with a huge trace sys.exit(1)
def upgrade(migrate_engine): meta.bind = migrate_engine RESOURCE_STATUSES = ['ACTIVE', 'PENDING', 'DELETED', 'ERROR'] ACTIONS = ['CREATE', 'DELETE', 'UPDATE', 'NONE'] # Get associated database tables domains_table = Table('domains', meta, autoload=True) records_table = Table('records', meta, autoload=True) # Upgrade the domains table. domains_table.c.status.alter( type=Enum(name='resource_statuses', *RESOURCE_STATUSES), default='PENDING', server_default='PENDING') action_column = Column('action', Enum(name='actions', *ACTIONS), default='CREATE', server_default='CREATE', nullable=False) action_column.create(domains_table) # Re-add constraint for sqlite. dialect = migrate_engine.url.get_dialect().name if dialect.startswith('sqlite'): constraint = UniqueConstraint( 'name', 'deleted', name='unique_domain_name', table=domains_table) constraint.create() # Upgrade the records table. records_table.c.status.alter( type=Enum(name='resource_statuses', *RESOURCE_STATUSES), default='PENDING', server_default='PENDING') action_column = Column('action', Enum(name='actions', *ACTIONS), default='CREATE', server_default='CREATE', nullable=False) action_column.create(records_table) serial_column = Column('serial', Integer(), server_default='1', nullable=False) serial_column.create(records_table) # Re-add constraint for sqlite. if dialect.startswith('sqlite'): constraint = UniqueConstraint( 'hash', name='unique_record', table=records_table) constraint.create()
def upgrade(migrate_engine): meta.bind = migrate_engine domains_table = Table("domains", meta, autoload=True) # Create the new columns deleted_column = Column("deleted", CHAR(32), nullable=False, default="0", server_default="0") deleted_column.create(domains_table, populate_default=True) deleted_at_column = Column("deleted_at", DateTime, nullable=True, default=None) deleted_at_column.create(domains_table, populate_default=True) # Drop the old single column unique # NOTE(kiall): It appears this does nothing. Miration 17 has been added. # leaving this here for reference. domains_table.c.name.alter(unique=False) # Add a new multi-column unique index constraint = UniqueConstraint("name", "deleted", name="unique_domain_name", table=domains_table) constraint.create()
def upgrade(migrate_engine): meta.bind = migrate_engine dialect = migrate_engine.url.get_dialect().name if dialect.startswith('sqlite'): domains_table = Table('domains', meta, autoload=True) servers_table = Table('servers', meta, autoload=True) # Add missing multi-column unique index constraint = UniqueConstraint('name', 'deleted', name='unique_domain_name', table=domains_table) constraint.create() # Add a missing unique index constraint = UniqueConstraint('name', name='unique_server_name', table=servers_table) constraint.create()
def upgrade(migrate_engine): meta.bind = migrate_engine domains_table = Table('domains', meta, autoload=True) # Add type and transferred_at to domains type_ = Column('type', Enum(name='type', *ZONE_TYPES), default='PRIMARY', server_default='PRIMARY') transferred_at = Column('transferred_at', DateTime, default=None) type_.create(domains_table, populate_default=True) transferred_at.create(domains_table, populate_default=True) domain_attributes.create() dialect = migrate_engine.url.get_dialect().name if dialect.startswith('sqlite'): constraint = UniqueConstraint( 'name', 'deleted', name='unique_domain_name', table=domains_table) # Add missing unique index constraint.create()
def downgrade(migrate_engine): meta.bind = migrate_engine rs_table = Table('recordsets', meta, autoload=True) records_table = Table('records', meta, autoload=True) recordsets = _get_recordsets(rs_table) col = Column('priority', Integer, default=None, nullable=True) col.create(records_table) record_cols = [ records_table.c.id, records_table.c.priority, records_table.c.data] for rs in recordsets: records = select(columns=record_cols)\ .where(records_table.c.recordset_id == rs[0])\ .execute().fetchall() for record in records: priority, _, data = record[2].partition(" ") # Old style hashes are <rs_id>:<data>:<priority> new_hash = _build_hash(rs[0], data, priority) update = records_table.update()\ .where(records_table.c.id == record[0])\ .values(priority=int(priority), data=data, hash=new_hash) update.execute() dialect = migrate_engine.url.get_dialect().name if dialect.startswith('sqlite'): # Add missing unique index constraint = UniqueConstraint('hash', name='unique_recordset', table=records_table) constraint.create()
def downgrade(migrate_engine): meta.bind = migrate_engine RESOURCE_STATUSES = ['ACTIVE', 'PENDING', 'DELETED'] # Get associated database tables domains_table = Table('domains', meta, autoload=True) records_table = Table('records', meta, autoload=True) # Downgrade the domains table. domains_table.c.status.alter(type=Enum(name='resource_statuses', *RESOURCE_STATUSES), default='ACTIVE', server_default='ACTIVE') domains_table.c.action.drop() # Re-add constraint for sqlite. dialect = migrate_engine.url.get_dialect().name if dialect.startswith('sqlite'): constraint = UniqueConstraint('name', 'deleted', name='unique_domain_name', table=domains_table) constraint.create() # Downgrade the records table. records_table.c.status.alter(type=Enum(name='resource_statuses', *RESOURCE_STATUSES), default='ACTIVE', server_default='ACTIVE') records_table.c.action.drop() records_table.c.serial.drop() # Re-add constraint for sqlite. if dialect.startswith('sqlite'): constraint = UniqueConstraint('hash', name='unique_record', table=records_table) constraint.create()
def upgrade(migrate_engine): meta.bind = migrate_engine rs_table = Table('recordsets', meta, autoload=True) records_table = Table('records', meta, autoload=True) recordsets = _get_recordsets(rs_table) record_cols = [ records_table.c.id, records_table.c.priority, records_table.c.data] for rs in recordsets: query = select(columns=record_cols)\ .where(records_table.c.recordset_id == rs[0])\ .where(records_table.c.priority != None) # noqa records = query.execute().fetchall() for record in records: new_data = '%s %s' % (int(record[1]), record[2]) # New style hashes are <rs_id>:<data> since prio is baked into data new_hash = _build_hash(rs[0], new_data) update = records_table.update()\ .where(records_table.c.id == record[0])\ .values(data=new_data, hash=new_hash) migrate_engine.execute(update) records_table.c.priority.drop() dialect = migrate_engine.url.get_dialect().name if dialect.startswith('sqlite'): # Add missing unique index constraint = UniqueConstraint('hash', name='unique_recordset', table=records_table) constraint.create()
def downgrade(migrate_engine): meta.bind = migrate_engine dialect = migrate_engine.url.get_dialect().name RECORD_TYPES = ['A', 'AAAA', 'CNAME', 'MX', 'SRV', 'TXT', 'SPF', 'NS', 'PTR', 'SSHFP'] recordsets_table = Table('recordsets', meta, autoload=True) # Delete all SOA records recordsets_table.delete().where(recordsets_table.c.type == 'SOA').execute() # Remove SOA from the ENUM recordsets_table.c.type.alter(type=Enum(name='recordset_types', *RECORD_TYPES)) # Re-add the constraint for sqlite if dialect.startswith('sqlite'): constraint = UniqueConstraint('domain_id', 'name', 'type', name='unique_recordset', table=recordsets_table) constraint.create()
def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine c_names = _get_constraint_names(migrate_engine.name) table = Table('instance_types', meta, autoload=True) for constraint_name in c_names['instance_types_name']: cons = UniqueConstraint('name', name=constraint_name, table=table) cons.create() table = Table('instance_types', meta, autoload=True) cons = UniqueConstraint('flavorid', name=c_names['instance_types_flavorid'], table=table) cons.create() table = Table('volume_types', meta, autoload=True) cons = UniqueConstraint('name', name=c_names['volume_types_name'], table=table) cons.create()