def upgrade(migrate_engine):
    meta = MetaData(bind=migrate_engine)
    submissions = Table('submissions', meta, autoload=True)
    slugc = Column('slug', UnicodeText)
    slugc.create(submissions)
    slugu = UniqueConstraint(slugc)
    slugu.create()
def downgrade(migrate_engine):
    meta.bind = migrate_engine

    domains_table = Table('domains', meta, autoload=True)

    constraint = UniqueConstraint('name', name='name', table=domains_table)
    constraint.create()
Exemple #3
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    t = Table("users_studies", meta, autoload=True)

    cons = UniqueConstraint(t.c.user_id, t.c.study_id)
    cons.create()
Exemple #4
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    domains_table = Table('domains', meta, autoload=True)

    # Create the new columns
    deleted_column = Column('deleted',
                            CHAR(32),
                            nullable=False,
                            default="0",
                            server_default="0")
    deleted_column.create(domains_table, populate_default=True)

    deleted_at_column = Column('deleted_at',
                               DateTime,
                               nullable=True,
                               default=None)
    deleted_at_column.create(domains_table, populate_default=True)

    # Drop the old single column unique
    # NOTE(kiall): It appears this does nothing. Miration 17 has been added.
    #              leaving this here for reference.
    domains_table.c.name.alter(unique=False)

    # Add a new multi-column unique index
    constraint = UniqueConstraint('name',
                                  'deleted',
                                  name='unique_domain_name',
                                  table=domains_table)
    constraint.create()
Exemple #5
0
def downgrade(migrate_engine):
    meta.bind = migrate_engine

    domains_table = Table('domains', meta, autoload=True)

    constraint = UniqueConstraint('name', name='name', table=domains_table)
    constraint.create()
def downgrade(migrate_engine):
    # Operations to reverse the above upgrade go here.
    meta = MetaData(bind=migrate_engine)
    submissions = Table('submissions', meta, autoload=True)
    slugu = UniqueConstraint(submissions.c.slug)
    slugu.drop()
    submissions.c.slug.drop()
def upgrade(migrate_engine):
    meta = sqlalchemy.MetaData(bind=migrate_engine)

    event = sqlalchemy.Table('event', meta, autoload=True)
    message_id = sqlalchemy.Column('message_id', sqlalchemy.String(50))
    event.create_column(message_id)

    cons = UniqueConstraint('message_id', table=event)
    cons.create()

    index = sqlalchemy.Index('idx_event_message_id', models.Event.message_id)
    index.create(bind=migrate_engine)

    # Populate the new column ...
    trait = sqlalchemy.Table('trait', meta, autoload=True)
    unique_name = sqlalchemy.Table('unique_name', meta, autoload=True)
    join = trait.join(unique_name, unique_name.c.id == trait.c.name_id)
    traits = sqlalchemy.select([trait.c.event_id, trait.c.t_string],
                               whereclause=(unique_name.c.key == 'message_id'),
                               from_obj=join)

    for event_id, value in traits.execute():
        event.update().\
            where(event.c.id == event_id).\
            values(message_id=value).\
            execute()
def unique_collections_slug(db):
    """Add unique constraint to collection slug"""
    metadata = MetaData(bind=db.bind)
    collection_table = inspect_table(metadata, "core__collections")
    existing_slugs = {}
    slugs_to_change = []

    for row in db.execute(collection_table.select()):
        # if duplicate slug, generate a unique slug
        if row.creator in existing_slugs and row.slug in \
           existing_slugs[row.creator]:
            slugs_to_change.append(row.id)
        else:
            if not row.creator in existing_slugs:
                existing_slugs[row.creator] = [row.slug]
            else:
                existing_slugs[row.creator].append(row.slug)

    for row_id in slugs_to_change:
        new_slug = unicode(uuid.uuid4())
        db.execute(collection_table.update().
                   where(collection_table.c.id == row_id).
                   values(slug=new_slug))
    # sqlite does not like to change the schema when a transaction(update) is
    # not yet completed
    db.commit()

    constraint = UniqueConstraint('creator', 'slug',
                                  name='core__collection_creator_slug_key',
                                  table=collection_table)
    constraint.create()

    db.commit()
Exemple #9
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    # Load the TSIG Keys tables
    tsigkeys_table = Table('tsigkeys', meta, autoload=True)

    scopes = Enum(name='tsig_scopes', metadata=meta, *TSIG_SCOPES)
    scopes.create()

    # Create the scope and resource columns
    scope_col = Column('scope', scopes, nullable=False, server_default='POOL')
    scope_col.create(tsigkeys_table)

    # Start with nullable=True and populate_default=True, then convert
    # to nullable=False once all rows have been populted with a resource_id
    resource_id_col = Column('resource_id', UUID, default=default_pool_id,
                             nullable=True)
    resource_id_col.create(tsigkeys_table, populate_default=True)

    # Now that we've populated the default pool id in existing rows, MySQL
    # will let us convert this over to nullable=False
    tsigkeys_table.c.resource_id.alter(nullable=False)

    dialect = migrate_engine.url.get_dialect().name
    if dialect.startswith('sqlite'):
        # Add missing unique index
        constraint = UniqueConstraint('name', name='unique_tsigkey_name',
                                      table=tsigkeys_table)
        constraint.create()
Exemple #10
0
def fix_CollectionItem_v0_constraint(db_conn):
    """Add the forgotten Constraint on CollectionItem"""

    global collectionitem_unique_constraint_done
    if collectionitem_unique_constraint_done:
        # Reset it. Maybe the whole thing gets run again
        # For a different db?
        collectionitem_unique_constraint_done = False
        return

    metadata = MetaData(bind=db_conn.bind)

    CollectionItem_table = inspect_table(metadata, 'core__collection_items')

    constraint = UniqueConstraint('collection', 'media_entry',
        name='core__collection_items_collection_media_entry_key',
        table=CollectionItem_table)

    try:
        constraint.create()
    except ProgrammingError:
        # User probably has an install that was run since the
        # collection tables were added, so we don't need to run this migration.
        pass

    db_conn.commit()
Exemple #11
0
def upgrade(migrate_engine):
    metadata = MetaData()
    metadata.bind = migrate_engine
    user_table = Table('user', metadata, autoload=True)
    #    name_column = user_table.c.name
    unique_name_constraint = UniqueConstraint('name', table=user_table)
    unique_name_constraint.create()
def downgrade(migrate_engine):
    meta.bind = migrate_engine

    keys = Enum(name='key', metadata=meta, *ZONE_ATTRIBUTE_KEYS)
    types = Enum(name='types', metadata=meta, *ZONE_TYPES)

    domains_attributes_table = Table('domain_attributes', meta, autoload=True)
    domains_table = Table('domains', meta, autoload=True)

    domains = select(columns=[domains_table.c.id, domains_table.c.type])\
        .where(domains_table.c.type == 'SECONDARY')\
        .execute().fetchall()

    for dom in domains:
        delete = domains_table.delete()\
            .where(domains_table.id == dom.id)
        delete.execute()

    domains_table.c.type.drop()
    domains_table.c.transferred_at.drop()

    domains_attributes_table.drop()
    keys.drop()
    types.drop()

    dialect = migrate_engine.url.get_dialect().name
    if dialect.startswith('sqlite'):
        constraint = UniqueConstraint(
            'name', 'deleted', name='unique_domain_name', table=domains_table)

        # Add missing unique index
        constraint.create()
def upgrade(migrate_engine):
    metadata = MetaData()
    metadata.bind = migrate_engine
    user_table = Table('user', metadata, autoload=True)
#    name_column = user_table.c.name
    unique_name_constraint = UniqueConstraint('name', table=user_table)
    unique_name_constraint.create()
Exemple #14
0
def unique_collections_slug(db):
    """Add unique constraint to collection slug"""
    metadata = MetaData(bind=db.bind)
    collection_table = inspect_table(metadata, "core__collections")
    existing_slugs = {}
    slugs_to_change = []

    for row in db.execute(collection_table.select()):
        # if duplicate slug, generate a unique slug
        if row.creator in existing_slugs and row.slug in \
           existing_slugs[row.creator]:
            slugs_to_change.append(row.id)
        else:
            if not row.creator in existing_slugs:
                existing_slugs[row.creator] = [row.slug]
            else:
                existing_slugs[row.creator].append(row.slug)

    for row_id in slugs_to_change:
        new_slug = unicode(uuid.uuid4())
        db.execute(collection_table.update().where(
            collection_table.c.id == row_id).values(slug=new_slug))
    # sqlite does not like to change the schema when a transaction(update) is
    # not yet completed
    db.commit()

    constraint = UniqueConstraint('creator',
                                  'slug',
                                  name='core__collection_creator_slug_key',
                                  table=collection_table)
    constraint.create()

    db.commit()
Exemple #15
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    # Load the TSIG Keys tables
    tsigkeys_table = Table('tsigkeys', meta, autoload=True)

    # Create the scope and resource columns
    scope_col = Column('scope', Enum(name='tsig_scopes', *TSIG_SCOPES),
                       nullable=False, server_default='POOL')
    scope_col.create(tsigkeys_table)

    # Start with nullable=True and populate_default=True, then convert
    # to nullable=False once all rows have been populted with a resource_id
    resource_id_col = Column('resource_id', UUID, default=default_pool_id,
                             nullable=True)
    resource_id_col.create(tsigkeys_table, populate_default=True)

    # Now that we've populated the default pool id in existing rows, MySQL
    # will let us convert this over to nullable=False
    tsigkeys_table.c.resource_id.alter(nullable=False)

    dialect = migrate_engine.url.get_dialect().name
    if dialect.startswith('sqlite'):
        # Add missing unique index
        constraint = UniqueConstraint('name', name='unique_tsigkey_name',
                                      table=tsigkeys_table)
        constraint.create()
Exemple #16
0
def fix_CollectionItem_v0_constraint(db_conn):
    """Add the forgotten Constraint on CollectionItem"""

    global collectionitem_unique_constraint_done
    if collectionitem_unique_constraint_done:
        # Reset it. Maybe the whole thing gets run again
        # For a different db?
        collectionitem_unique_constraint_done = False
        return

    metadata = MetaData(bind=db_conn.bind)

    CollectionItem_table = inspect_table(metadata, 'core__collection_items')

    constraint = UniqueConstraint(
        'collection',
        'media_entry',
        name='core__collection_items_collection_media_entry_key',
        table=CollectionItem_table)

    try:
        constraint.create()
    except ProgrammingError:
        # User probably has an install that was run since the
        # collection tables were added, so we don't need to run this migration.
        pass

    db_conn.commit()
Exemple #17
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    domains_table = Table('domains', meta, autoload=True)

    # Get the default pool_id from the config file
    default_pool_id = cfg.CONF['service:central'].default_pool_id

    # Create the pool_id column
    pool_id_column = Column('pool_id',
                            UUID(),
                            default=default_pool_id,
                            nullable=True)
    pool_id_column.create(domains_table, populate_default=True)

    # Alter the table to drop default value after populating it
    domains_table.c.pool_id.alter(default=None)

    dialect = migrate_engine.url.get_dialect().name
    if dialect.startswith('sqlite'):
        # Add missing unique index
        constraint = UniqueConstraint('name', 'deleted',
                                      name='unique_domain_name',
                                      table=domains_table)
        constraint.create()
Exemple #18
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine
    dialect = migrate_engine.url.get_dialect().name

    domains_table = Table('domains', meta, autoload=True)

    if not dialect.startswith('sqlite'):
        constraint = UniqueConstraint('name', name='name', table=domains_table)
        constraint.drop()
    else:
        # SQLite can't drop a constraint. Yay. This will be fun..

        # Create a new name column without the unique index
        name_tmp_column = Column('name_tmp', String(255))
        name_tmp_column.create(domains_table)

        # Copy the data over.
        query = update(domains_table).values(name_tmp=domains_table.c.name)
        migrate_engine.execute(query)

        # Delete the name column
        domains_table.c.name.drop()

        # Rename the name_tmp column to name
        domains_table.c.name_tmp.alter(name='name')
def upgrade(migrate_engine):
    metadata.bind = migrate_engine

    table = Table('notification', metadata, autoload=True)

    cons = UniqueConstraint('event_id', 'user_id', table=table)
    cons.create()
Exemple #20
0
def downgrade(migrate_engine):
    meta.bind = migrate_engine

    domains_table = Table('domains', meta, autoload=True)

    # Drop the multi-column unique index
    constraint = UniqueConstraint('name',
                                  'deleted',
                                  name='unique_domain_name',
                                  table=domains_table)
    constraint.drop()

    # Revert to single column unique
    # NOTE(kiall): It appears this does nothing. Miration 17 has been added.
    #              leaving this here for reference.
    domains_table.c.name.alter(unique=True)

    # Drop the deleted columns
    deleted_column = Column('deleted', CHAR(32), nullable=True, default=None)
    deleted_column.drop(domains_table)

    deleted_at_column = Column('deleted_at',
                               DateTime,
                               nullable=True,
                               default=None)
    deleted_at_column.drop(domains_table)
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    t = Table("blinding_type", meta, autoload=True)

    cons = UniqueConstraint(t.c.blinding_set_id, t.c.name,
                            t.c.duplicate_number)
    cons.create()
Exemple #22
0
def downgrade(migrate_engine):
    meta = sqlalchemy.MetaData(bind=migrate_engine)
    event = sqlalchemy.Table('event', meta, autoload=True)
    message_id = sqlalchemy.Column('message_id', sqlalchemy.String(50))
    cons = UniqueConstraint('message_id', table=event)
    cons.drop()
    index = sqlalchemy.Index('idx_event_message_id', event.c.message_id)
    index.drop(bind=migrate_engine)
    event.drop_column(message_id)
def downgrade(migrate_engine):
    meta = sqlalchemy.MetaData(bind=migrate_engine)
    event = sqlalchemy.Table('event', meta, autoload=True)
    message_id = sqlalchemy.Column('message_id', sqlalchemy.String(50))
    cons = UniqueConstraint('message_id', table=event)
    cons.drop()
    index = sqlalchemy.Index('idx_event_message_id', models.Event.message_id)
    index.drop(bind=migrate_engine)
    event.drop_column(message_id)
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    t = Table("demographics", meta, autoload=True)

    cons = UniqueConstraint(t.c.nhs_number, name='ix_demographics_nhs_number')
    cons.drop()

    idx = Index('ix_demographics_nhs_number', t.c.nhs_number)
    idx.create(migrate_engine)
Exemple #25
0
def upgrade(migrate_engine):
    # ignore reflection warnings
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category=sa_exc.SAWarning)
        metadata = MetaData()
        metadata.bind = migrate_engine
        user_table = Table('user', metadata, autoload=True)
        #    name_column = user_table.c.name
        unique_name_constraint = UniqueConstraint('name', table=user_table)
        unique_name_constraint.create()
def upgrade(migrate_engine):
    # ignore reflection warnings
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category=sa_exc.SAWarning)
        metadata = MetaData()
        metadata.bind = migrate_engine
        user_table = Table('user', metadata, autoload=True)
    #    name_column = user_table.c.name
        unique_name_constraint = UniqueConstraint('name', table=user_table)
        unique_name_constraint.create()
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    pool_attributes_table = Table('pool_attributes', meta, autoload=True)

    # Create UniqueConstraint
    constraint = UniqueConstraint('pool_id', 'key', 'value',
                                  name='unique_pool_attribute',
                                  table=pool_attributes_table)

    constraint.create()
Exemple #28
0
def downgrade(migrate_engine):
    meta.bind = migrate_engine
    dialect = migrate_engine.url.get_dialect().name

    if dialect.startswith('sqlite'):
        records_table = Table('records', meta, autoload=True)

        # Drop the unique index
        constraint = UniqueConstraint('hash',
                                      name='unique_recordset',
                                      table=records_table)
        constraint.drop()
def downgrade(migrate_engine):
    meta.bind = migrate_engine
    dialect = migrate_engine.url.get_dialect().name
    zones_table = Table('domains', meta, autoload=True)
    records_table = Table('records', meta, autoload=True)

    RECORD_TYPES = ['A', 'AAAA', 'CNAME', 'MX', 'SRV', 'TXT', 'SPF', 'NS',
                    'PTR', 'SSHFP']

    recordsets_table = Table('recordsets', meta, autoload=True)

    # Delete all SOA records
    recordsets_table.delete().where(recordsets_table.c.type == 'SOA').execute()

    # Remove SOA from the ENUM
    recordsets_table.c.type.alter(type=Enum(name='recordset_types',
                                            *RECORD_TYPES))

    # Remove non-delegated NS records
    # Get all the zones
    zones = select(
        columns=[
            zones_table.c.id,
            zones_table.c.created_at,
            zones_table.c.tenant_id,
            zones_table.c.name,
            zones_table.c.email,
            zones_table.c.serial,
            zones_table.c.refresh,
            zones_table.c.retry,
            zones_table.c.expire,
            zones_table.c.minimum
        ]
    ).execute().fetchall()

    for zone in zones:
        # for each zone, get all non-delegated NS recordsets
        results = recordsets_table.select().\
            where(recordsets_table.c.type == 'NS').\
            where(recordsets_table.c.name == zone.name).execute()
        for r in results:
            records_table.delete().\
                where(records_table.c.recordset_id == r.id).\
                where(records_table.c.managed == 1).execute()
        # NOTE: The value 1 is used instead of True because flake8 complains

    # Re-add the constraint for sqlite
    if dialect.startswith('sqlite'):
        constraint = UniqueConstraint('domain_id', 'name', 'type',
                                      name='unique_recordset',
                                      table=recordsets_table)

        constraint.create()
Exemple #30
0
def pw_hash_nullable(db):
    """Make pw_hash column nullable"""
    metadata = MetaData(bind=db.bind)
    user_table = inspect_table(metadata, "core__users")

    user_table.c.pw_hash.alter(nullable=True)

    # sqlite+sqlalchemy seems to drop this constraint during the
    # migration, so we add it back here for now a bit manually.
    if db.bind.url.drivername == 'sqlite':
        constraint = UniqueConstraint('username', table=user_table)
        constraint.create()

    db.commit()
def downgrade(migrate_engine):
    meta.bind = migrate_engine

    # Domains Table
    domains_table = Table('domains', meta, autoload=True)

    rev_ind = Index('reverse_name_deleted', domains_table.c.reverse_name,
                    domains_table.c.deleted)
    rev_ind.drop(migrate_engine)

    # Recordsets Table
    rsets_table = Table('recordsets', meta, autoload=True)

    rev_ind = Index('reverse_name_dom_id', rsets_table.c.reverse_name,
                    rsets_table.c.domain_id)
    rev_ind.drop(migrate_engine)

    domains_table.c.reverse_name.drop()
    rsets_table.c.reverse_name.drop()

    # Recreate constraints for SQLite
    dialect = migrate_engine.url.get_dialect().name
    if dialect.startswith('sqlite'):
        domains_constraint = UniqueConstraint('name',
                                              'deleted',
                                              name='unique_domain_name',
                                              table=domains_table)
        recordsets_constraint = UniqueConstraint('domain_id',
                                                 'name',
                                                 'type',
                                                 name='unique_recordset',
                                                 table=rsets_table)
        domains_constraint.create()
        recordsets_constraint.create()
Exemple #32
0
def pw_hash_nullable(db):
    """Make pw_hash column nullable"""
    metadata = MetaData(bind=db.bind)
    user_table = inspect_table(metadata, "core__users")

    user_table.c.pw_hash.alter(nullable=True)

    # sqlite+sqlalchemy seems to drop this constraint during the
    # migration, so we add it back here for now a bit manually.
    if db.bind.url.drivername == 'sqlite':
        constraint = UniqueConstraint('username', table=user_table)
        constraint.create()

    db.commit()
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine
    service_statuses_table = Table('service_statuses', meta, autoload=True)

    # Add UniqueConstraint based on service_name and hostname.
    constraint = UniqueConstraint('service_name', 'hostname',
                                  table=service_statuses_table,
                                  name="unique_service_status")
    try:
        constraint.create()
    except exc.IntegrityError as e:
        LOG.error(EXPLANATION, e)
        # Use sys.exit so we don't blow up with a huge trace
        sys.exit(1)
def downgrade(migrate_engine):
    meta.bind = migrate_engine
    dialect = migrate_engine.url.get_dialect().name
    zones_table = Table('domains', meta, autoload=True)
    records_table = Table('records', meta, autoload=True)

    RECORD_TYPES = [
        'A', 'AAAA', 'CNAME', 'MX', 'SRV', 'TXT', 'SPF', 'NS', 'PTR', 'SSHFP'
    ]

    recordsets_table = Table('recordsets', meta, autoload=True)

    # Delete all SOA records
    recordsets_table.delete().where(recordsets_table.c.type == 'SOA').execute()

    # Remove SOA from the ENUM
    recordsets_table.c.type.alter(
        type=Enum(name='recordset_types', *RECORD_TYPES))

    # Remove non-delegated NS records
    # Get all the zones
    zones = select(columns=[
        zones_table.c.id, zones_table.c.created_at, zones_table.c.tenant_id,
        zones_table.c.name, zones_table.c.email, zones_table.c.serial,
        zones_table.c.refresh, zones_table.c.retry, zones_table.c.expire,
        zones_table.c.minimum
    ]).execute().fetchall()

    for zone in zones:
        # for each zone, get all non-delegated NS recordsets
        results = recordsets_table.select().\
            where(recordsets_table.c.type == 'NS').\
            where(recordsets_table.c.name == zone.name).execute()
        for r in results:
            records_table.delete().\
                where(records_table.c.recordset_id == r.id).\
                where(records_table.c.managed == 1).execute()
        # NOTE: The value 1 is used instead of True because flake8 complains

    # Re-add the constraint for sqlite
    if dialect.startswith('sqlite'):
        constraint = UniqueConstraint('domain_id',
                                      'name',
                                      'type',
                                      name='unique_recordset',
                                      table=recordsets_table)

        constraint.create()
Exemple #35
0
def downgrade(migrate_engine):
    meta.bind = migrate_engine

    # Load the TSIG Keys tables
    tsigkeys_table = Table('tsigkeys', meta, autoload=True)

    # Create the scope and resource columns
    tsigkeys_table.c.scope.drop()
    tsigkeys_table.c.resource_id.drop()

    dialect = migrate_engine.url.get_dialect().name
    if dialect.startswith('sqlite'):
        # Add missing unique index
        constraint = UniqueConstraint('name', name='unique_tsigkey_name',
                                      table=tsigkeys_table)
        constraint.create()
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine
    service_statuses_table = Table('service_statuses', meta, autoload=True)

    # Add UniqueConstraint based on service_name and hostname.
    constraint = UniqueConstraint('service_name',
                                  'hostname',
                                  table=service_statuses_table,
                                  name="unique_service_status")
    try:
        constraint.create()
    except exc.IntegrityError as e:
        LOG.error(EXPLANATION, e)
        # Use sys.exit so we don't blow up with a huge trace
        sys.exit(1)
def downgrade(migrate_engine):
    meta.bind = migrate_engine
    t = Table("user", meta, autoload=True)

    t.c.date_created.alter(name='created_datetime')

    email_cons = UniqueConstraint(t.c.email, name='udx__user__email')
    email_cons.drop()

    t.c.email.drop()
    t.c.password.drop()
    t.c.confirmed_at.drop()
    t.c.last_login_at.drop()
    t.c.current_login_at.drop()
    t.c.last_login_ip.drop()
    t.c.current_login_ip.drop()
    t.c.login_count.drop()
Exemple #38
0
    def test_drop_unique_constraint(self):
        table = self.reflected_table
        UniqueConstraint(table.c.a, table.c.b, name='unique_a_b').drop()

        existing = uniques(*self.helper._get_unique_constraints(table))
        should_be = uniques(
            sa.UniqueConstraint(table.c.b, table.c.c, name='unique_b_c'), )
        self.assertEqual(should_be, existing)
Exemple #39
0
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    ds_table = Table('datastores', meta, autoload=True)
    ds_version_table = Table('datastore_versions', meta, autoload=True)
    ds_version_table.create_column(
        Column('version', String(255), nullable=True))

    ds_versions = select(columns=[text("id"), text("name")],
                         from_obj=ds_version_table).execute()

    # Use 'name' value as init 'version' value
    for version in ds_versions:
        update(table=ds_version_table,
               whereclause=text("id='%s'" % version.id),
               values=dict(version=version.name)).execute()

    # Change unique constraint, need to drop the foreign key first and add back
    # later
    constraint_names = db_utils.get_foreign_key_constraint_names(
        engine=migrate_engine,
        table='datastore_versions',
        columns=['datastore_id'],
        ref_table='datastores',
        ref_columns=['id'])
    db_utils.drop_foreign_key_constraints(
        constraint_names=constraint_names,
        columns=[ds_version_table.c.datastore_id],
        ref_columns=[ds_table.c.id])

    UniqueConstraint('datastore_id',
                     'name',
                     name='ds_versions',
                     table=ds_version_table).drop()
    UniqueConstraint('datastore_id',
                     'name',
                     'version',
                     name='ds_versions',
                     table=ds_version_table).create()

    db_utils.create_foreign_key_constraints(
        constraint_names=constraint_names,
        columns=[ds_version_table.c.datastore_id],
        ref_columns=[ds_table.c.id])
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    keys = Enum(name='key', *ZONE_ATTRIBUTE_KEYS)

    domain_attributes_table = Table(
        'domain_attributes', meta,
        Column('id', UUID(), default=utils.generate_uuid, primary_key=True),
        Column('version', Integer(), default=1, nullable=False),
        Column('created_at', DateTime, default=lambda: timeutils.utcnow()),
        Column('updated_at', DateTime, onupdate=lambda: timeutils.utcnow()),

        Column('key', keys),
        Column('value', String(255), nullable=False),
        Column('domain_id', UUID(), nullable=False),

        UniqueConstraint('key', 'value', 'domain_id',
                         name='unique_attributes'),
        ForeignKeyConstraint(['domain_id'], ['domains.id'],
                             ondelete='CASCADE'),

        mysql_engine='INNODB',
        mysql_charset='utf8'
    )

    domains_table = Table('domains', meta, autoload=True)
    types = Enum(name='types', metadata=meta, *ZONE_TYPES)
    types.create()

    # Add type and transferred_at to domains
    type_ = Column('type', types, default='PRIMARY', server_default='PRIMARY')
    transferred_at = Column('transferred_at', DateTime, default=None)

    type_.create(domains_table, populate_default=True)
    transferred_at.create(domains_table, populate_default=True)

    domain_attributes_table.create()

    dialect = migrate_engine.url.get_dialect().name
    if dialect.startswith('sqlite'):
        constraint = UniqueConstraint(
            'name', 'deleted', name='unique_domain_name', table=domains_table)

        # Add missing unique index
        constraint.create()
Exemple #41
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine
    field.create()

    field_type = Table("field_type", meta, autoload=True)
    study = Table("study", meta, autoload=True)

    fk_field_type = ForeignKeyConstraint([field.c.field_type_id],
                                         [field_type.c.id])
    fk_field_type.create()

    fk_study = ForeignKeyConstraint([field.c.study_id], [study.c.id])
    fk_study.create()

    uk_study_id_order = UniqueConstraint(field.c.study_id,
                                         field.c.order,
                                         table=field)
    uk_study_id_order.create()
Exemple #42
0
def downgrade(migrate_engine):
    meta.bind = migrate_engine

    # Load the TSIG Keys tables
    tsigkeys_table = Table('tsigkeys', meta, autoload=True)
    scopes = Enum(name='tsig_scopes', metadata=meta, *TSIG_SCOPES)

    # Create the scope and resource columns
    tsigkeys_table.c.scope.drop()
    tsigkeys_table.c.resource_id.drop()
    scopes.drop()

    dialect = migrate_engine.url.get_dialect().name
    if dialect.startswith('sqlite'):
        # Add missing unique index
        constraint = UniqueConstraint('name', name='unique_tsigkey_name',
                                      table=tsigkeys_table)
        constraint.create()
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    pool_ns_records_table = Table('pool_ns_records', meta, autoload=True)

    # Only apply it if it's not there (It's been backported to L)
    constraints = [i.name for i in pool_ns_records_table.constraints]

    if CONSTRAINT_NAME not in constraints:
        # We define the constraint here if not it shows in the list above.
        constraint = UniqueConstraint('pool_id', 'hostname',
                                      name=CONSTRAINT_NAME,
                                      table=pool_ns_records_table)
        try:
            constraint.create()
        except exc.IntegrityError as e:
            LOG.error(explanation, e)
            # Use sys.exit so we dont blow up with a huge trace
            sys.exit(1)
    def test_failure(self):
        # Drop unique constraint so we can test error cases
        constraint = UniqueConstraint('service_name', 'hostname',
                                      table=self.service_statuses_table,
                                      name="unique_service_status")
        constraint.drop()
        fake_record = {'id': '1',
                       'service_name': 'worker',
                       'hostname': 'localhost',
                       'status': 'UP',
                       'stats': '',
                       'capabilities': '',
                       }
        self.service_statuses_table.insert().execute(fake_record)
        fake_record['id'] = '2'
        self.service_statuses_table.insert().execute(fake_record)

        checks = status.Checks()
        self.assertEqual(upgradecheck.Code.FAILURE,
                         checks._duplicate_service_status().code)
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    domains_table = Table("domains", meta, autoload=True)

    # Create the new columns
    deleted_column = Column("deleted", CHAR(32), nullable=False, default="0", server_default="0")
    deleted_column.create(domains_table, populate_default=True)

    deleted_at_column = Column("deleted_at", DateTime, nullable=True, default=None)
    deleted_at_column.create(domains_table, populate_default=True)

    # Drop the old single column unique
    # NOTE(kiall): It appears this does nothing. Miration 17 has been added.
    #              leaving this here for reference.
    domains_table.c.name.alter(unique=False)

    # Add a new multi-column unique index
    constraint = UniqueConstraint("name", "deleted", name="unique_domain_name", table=domains_table)
    constraint.create()
def downgrade(migrate_engine):
    meta.bind = migrate_engine

    domains_table = Table("domains", meta, autoload=True)

    # Drop the multi-column unique index
    constraint = UniqueConstraint("name", "deleted", name="unique_domain_name", table=domains_table)
    constraint.drop()

    # Revert to single column unique
    # NOTE(kiall): It appears this does nothing. Miration 17 has been added.
    #              leaving this here for reference.
    domains_table.c.name.alter(unique=True)

    # Drop the deleted columns
    deleted_column = Column("deleted", CHAR(32), nullable=True, default=None)
    deleted_column.drop(domains_table)

    deleted_at_column = Column("deleted_at", DateTime, nullable=True, default=None)
    deleted_at_column.drop(domains_table)
def downgrade(migrate_engine):
    meta.bind = migrate_engine

    # Domains Table
    domains_table = Table("domains", meta, autoload=True)

    rev_ind = Index("reverse_name_deleted", domains_table.c.reverse_name, domains_table.c.deleted)
    rev_ind.drop(migrate_engine)

    # Recordsets Table
    rsets_table = Table("recordsets", meta, autoload=True)

    rev_ind = Index("reverse_name_dom_id", rsets_table.c.reverse_name, rsets_table.c.domain_id)
    rev_ind.drop(migrate_engine)

    domains_table.c.reverse_name.drop()
    rsets_table.c.reverse_name.drop()

    # Recreate constraints for SQLite
    dialect = migrate_engine.url.get_dialect().name
    if dialect.startswith("sqlite"):
        domains_constraint = UniqueConstraint("name", "deleted", name="unique_domain_name", table=domains_table)
        recordsets_constraint = UniqueConstraint(
            "domain_id", "name", "type", name="unique_recordset", table=rsets_table
        )
        domains_constraint.create()
        recordsets_constraint.create()
def downgrade(migrate_engine):
    meta.bind = migrate_engine

    RESOURCE_STATUSES = ['ACTIVE', 'PENDING', 'DELETED']

    # Get associated database tables
    domains_table = Table('domains', meta, autoload=True)
    records_table = Table('records', meta, autoload=True)

    # Downgrade the domains table.
    domains_table.c.status.alter(
        type=Enum(name='resource_statuses', *RESOURCE_STATUSES),
        default='ACTIVE', server_default='ACTIVE')
    domains_table.c.action.drop()

    # Re-add constraint for sqlite.
    dialect = migrate_engine.url.get_dialect().name
    if dialect.startswith('sqlite'):
        constraint = UniqueConstraint(
            'name', 'deleted', name='unique_domain_name', table=domains_table)
        constraint.create()

    # Downgrade the records table.
    records_table.c.status.alter(
        type=Enum(name='resource_statuses', *RESOURCE_STATUSES),
        default='ACTIVE', server_default='ACTIVE')
    records_table.c.action.drop()
    records_table.c.serial.drop()

    # Re-add constraint for sqlite.
    if dialect.startswith('sqlite'):
        constraint = UniqueConstraint(
            'hash', name='unique_record', table=records_table)
        constraint.create()
def downgrade(migrate_engine):
    meta.bind = migrate_engine
    pool_attributes_table = Table('pool_attributes', meta, autoload=True)
    # pools = Table('pools', meta, autoload=True)

    constraint = UniqueConstraint('pool_id', 'key', 'value',
                                  name='unique_pool_attribute',
                                  table=pool_attributes_table)

    fk_constraint = ForeignKeyConstraint(columns=['pool_id'],
                                         refcolumns=['pools.id'],
                                         ondelete='CASCADE',
                                         table=pool_attributes_table)

    # First have to drop the ForeignKeyConstraint
    fk_constraint.drop()

    # Then drop the UniqueConstraint
    constraint.drop()

    # Then recreate the ForeignKeyConstraint
    fk_constraint.create()
def downgrade(migrate_engine):
    meta.bind = migrate_engine

    rs_table = Table('recordsets', meta, autoload=True)
    records_table = Table('records', meta, autoload=True)

    recordsets = _get_recordsets(rs_table)

    col = Column('priority', Integer, default=None, nullable=True)
    col.create(records_table)

    record_cols = [
        records_table.c.id,
        records_table.c.priority,
        records_table.c.data]

    for rs in recordsets:
        records = select(columns=record_cols)\
            .where(records_table.c.recordset_id == rs[0])\
            .execute().fetchall()

        for record in records:
            priority, _, data = record[2].partition(" ")

            # Old style hashes are <rs_id>:<data>:<priority>
            new_hash = _build_hash(rs[0], data, priority)

            update = records_table.update()\
                .where(records_table.c.id == record[0])\
                .values(priority=int(priority), data=data, hash=new_hash)
            update.execute()

    dialect = migrate_engine.url.get_dialect().name
    if dialect.startswith('sqlite'):
        # Add missing unique index
        constraint = UniqueConstraint('hash',
                                      name='unique_recordset',
                                      table=records_table)
        constraint.create()
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    rs_table = Table('recordsets', meta, autoload=True)
    records_table = Table('records', meta, autoload=True)

    recordsets = _get_recordsets(rs_table)

    record_cols = [
        records_table.c.id,
        records_table.c.priority,
        records_table.c.data]

    for rs in recordsets:
        query = select(columns=record_cols)\
            .where(records_table.c.recordset_id == rs[0])\
            .where(records_table.c.priority != None)  # noqa
        records = query.execute().fetchall()

        for record in records:
            new_data = '%s %s' % (int(record[1]), record[2])
            # New style hashes are <rs_id>:<data> since prio is baked into data
            new_hash = _build_hash(rs[0], new_data)

            update = records_table.update()\
                .where(records_table.c.id == record[0])\
                .values(data=new_data, hash=new_hash)
            migrate_engine.execute(update)

    records_table.c.priority.drop()

    dialect = migrate_engine.url.get_dialect().name
    if dialect.startswith('sqlite'):
        # Add missing unique index
        constraint = UniqueConstraint('hash',
                                      name='unique_recordset',
                                      table=records_table)
        constraint.create()