def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns, **col_name_col_instance): """ This method drops UC from table and works for mysql, postgresql and sqlite. In mysql and postgresql we are able to use "alter table" constuction. In sqlite is only one way to drop UC: 1) Create new table with same columns, indexes and constraints (except one that we want to drop). 2) Copy data from old table to new. 3) Drop old table. 4) Rename new table to the name of old table. :param migrate_engine: sqlalchemy engine :param table_name: name of table that contains uniq constarint. :param uc_name: name of uniq constraint that will be dropped. :param columns: columns that are in uniq constarint. :param col_name_col_instance: contains pair column_name=column_instance. column_instance is instance of Column. These params are required only for columns that have unsupported types by sqlite. For example BigInteger. """ if migrate_engine.name in ["mysql", "postgresql"]: meta = MetaData() meta.bind = migrate_engine t = Table(table_name, meta, autoload=True) uc = UniqueConstraint(*columns, table=t, name=uc_name) uc.drop() else: _drop_unique_constraint_in_sqlite(migrate_engine, table_name, uc_name, **col_name_col_instance)
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine dialect = migrate_engine.url.get_dialect().name aggregates = Table("aggregates", meta, autoload=True) aggregate_metadata = Table("aggregate_metadata", meta, autoload=True) record_list = list(aggregates.select().execute()) for rec in record_list: row = aggregate_metadata.insert() row.execute( { "created_at": rec["created_at"], "updated_at": rec["updated_at"], "deleted_at": rec["deleted_at"], "deleted": rec["deleted"], "key": "operational_state", "value": rec["operational_state"], "aggregate_id": rec["id"], } ) aggregates.drop_column("operational_state") aggregate_hosts = Table("aggregate_hosts", meta, autoload=True) if dialect.startswith("sqlite"): aggregate_hosts.c.host.alter(unique=False) elif dialect.startswith("postgres"): ucon = UniqueConstraint("host", name="aggregate_hosts_host_key", table=aggregate_hosts) ucon.drop() else: col = aggregate_hosts.c.host UniqueConstraint(col, name="host").drop()
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine dialect = migrate_engine.url.get_dialect().name aggregates = Table('aggregates', meta, autoload=True) aggregate_metadata = Table('aggregate_metadata', meta, autoload=True) record_list = list(aggregates.select().execute()) for rec in record_list: row = aggregate_metadata.insert() row.execute({ 'created_at': rec['created_at'], 'updated_at': rec['updated_at'], 'deleted_at': rec['deleted_at'], 'deleted': rec['deleted'], 'key': 'operational_state', 'value': rec['operational_state'], 'aggregate_id': rec['id'], }) aggregates.drop_column('operational_state') aggregate_hosts = Table('aggregate_hosts', meta, autoload=True) if dialect.startswith('sqlite'): aggregate_hosts.c.host.alter(unique=False) elif dialect.startswith('postgres'): ucon = UniqueConstraint('host', name='aggregate_hosts_host_key', table=aggregate_hosts) ucon.drop() else: col = aggregate_hosts.c.host UniqueConstraint(col, name='host').drop()
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine dialect = migrate_engine.url.get_dialect().name aggregates = Table('aggregates', meta, autoload=True) aggregate_metadata = Table('aggregate_metadata', meta, autoload=True) record_list = list(aggregates.select().execute()) for rec in record_list: row = aggregate_metadata.insert() row.execute({'created_at': rec['created_at'], 'updated_at': rec['updated_at'], 'deleted_at': rec['deleted_at'], 'deleted': rec['deleted'], 'key': 'operational_state', 'value': rec['operational_state'], 'aggregate_id': rec['id'], }) aggregates.drop_column('operational_state') aggregate_hosts = Table('aggregate_hosts', meta, autoload=True) if dialect.startswith('sqlite'): aggregate_hosts.drop_column('host') aggregate_hosts.create_column(Column('host', String(255))) elif dialect.startswith('postgres'): ucon = UniqueConstraint('host', name='aggregate_hosts_host_key', table=aggregate_hosts) ucon.drop() else: col = aggregate_hosts.c.host UniqueConstraint(col, name='host').drop()
def downgrade(migrate_engine): meta = MetaData(bind=migrate_engine) table = Table(TABLE_NAME, meta, autoload=True) utils.drop_unique_constraint(migrate_engine, TABLE_NAME, NEW_NAME, *COLUMNS) uc_old = UniqueConstraint(*COLUMNS, table=table, name=OLD_NAME) uc_old.create()
def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns, **col_name_col_instance): """ This method drops UC from table and works for mysql, postgresql and sqlite. In mysql and postgresql we are able to use "alter table" construction. In sqlite is only one way to drop UC: 1) Create new table with same columns, indexes and constraints (except one that we want to drop). 2) Copy data from old table to new. 3) Drop old table. 4) Rename new table to the name of old table. :param migrate_engine: sqlalchemy engine :param table_name: name of table that contains uniq constraint. :param uc_name: name of uniq constraint that will be dropped. :param columns: columns that are in uniq constraint. :param col_name_col_instance: contains pair column_name=column_instance. column_instance is instance of Column. These params are required only for columns that have unsupported types by sqlite. For example BigInteger. """ if migrate_engine.name == "sqlite": _drop_unique_constraint_in_sqlite(migrate_engine, table_name, uc_name, **col_name_col_instance) else: meta = MetaData() meta.bind = migrate_engine t = Table(table_name, meta, autoload=True) uc = UniqueConstraint(*columns, table=t, name=uc_name) uc.drop()
def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns, **col_name_col_instance): """Drop unique constraint from table. This method drops UC from table and works for mysql, postgresql and sqlite. In mysql and postgresql we are able to use "alter table" construction. Sqlalchemy doesn't support some sqlite column types and replaces their type with NullType in metadata. We process these columns and replace NullType with the correct column type. :param migrate_engine: sqlalchemy engine :param table_name: name of table that contains uniq constraint. :param uc_name: name of uniq constraint that will be dropped. :param columns: columns that are in uniq constraint. :param col_name_col_instance: contains pair column_name=column_instance. column_instance is instance of Column. These params are required only for columns that have unsupported types by sqlite. For example BigInteger. """ meta = MetaData() meta.bind = migrate_engine t = Table(table_name, meta, autoload=True) if migrate_engine.name == "sqlite": override_cols = [ _get_not_supported_column(col_name_col_instance, col.name) for col in t.columns if isinstance(col.type, NullType) ] for col in override_cols: t.columns.replace(col) uc = UniqueConstraint(*columns, table=t, name=uc_name) uc.drop()
def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) t = Table('chassis', meta, autoload=True) # NOTE: new name convention for UC uc = UniqueConstraint('uuid', table=t, name='uniq_chassis0uuid') uc.create()
def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) t = Table(TABLE_NAME, meta, autoload=True) utils.drop_old_duplicate_entries_from_table(migrate_engine, TABLE_NAME, True, *COLUMNS) uc = UniqueConstraint(*COLUMNS, table=t, name=UC_NAME) uc.create()
def test_util_drop_unique_constraint_with_not_supported_sqlite_type(self): table_name = "__test_tmp_table__" uc_name = 'uniq_foo' values = [ {'id': 1, 'a': 3, 'foo': 10}, {'id': 2, 'a': 2, 'foo': 20}, {'id': 3, 'a': 1, 'foo': 30} ] for key, engine in self.engines.items(): meta = MetaData() meta.bind = engine test_table = Table(table_name, meta, Column('id', Integer, primary_key=True, nullable=False), Column('a', Integer), Column('foo', CustomType, default=0), UniqueConstraint('a', name='uniq_a'), UniqueConstraint('foo', name=uc_name)) test_table.create() engine.execute(test_table.insert(), values) if key == "sqlite": warnings.simplefilter("ignore", SAWarning) # NOTE(boris-42): Missing info about column `foo` that has # unsupported type CustomType. self.assertRaises(exception.NovaException, utils.drop_unique_constraint, engine, table_name, uc_name, 'foo') # NOTE(boris-42): Wrong type of foo instance. it should be # instance of sqlalchemy.Column. self.assertRaises(exception.NovaException, utils.drop_unique_constraint, engine, table_name, uc_name, 'foo', foo=Integer()) foo = Column('foo', CustomType, default=0) utils.drop_unique_constraint(engine, table_name, uc_name, 'foo', foo=foo) s = test_table.select().order_by(test_table.c.id) rows = engine.execute(s).fetchall() for i in xrange(0, len(values)): v = values[i] self.assertEqual((v['id'], v['a'], v['foo']), rows[i]) # NOTE(boris-42): Update data about Table from DB. meta = MetaData() meta.bind = engine test_table = Table(table_name, meta, autoload=True) constraints = filter(lambda c: c.name == uc_name, test_table.constraints) self.assertEqual(len(constraints), 0) self.assertEqual(len(test_table.constraints), 1) test_table.drop()
def test_utils_drop_unique_constraint(self): table_name = "__test_tmp_table__" uc_name = 'uniq_foo' values = [ { 'id': 1, 'a': 3, 'foo': 10 }, { 'id': 2, 'a': 2, 'foo': 20 }, { 'id': 3, 'a': 1, 'foo': 30 }, ] for engine in self.engines.values(): meta = MetaData() meta.bind = engine test_table = Table( table_name, meta, Column('id', Integer, primary_key=True, nullable=False), Column('a', Integer), Column('foo', Integer), UniqueConstraint('a', name='uniq_a'), UniqueConstraint('foo', name=uc_name), ) test_table.create() engine.execute(test_table.insert(), values) # NOTE(boris-42): This method is generic UC dropper. utils.drop_unique_constraint(engine, table_name, uc_name, 'foo') s = test_table.select().order_by(test_table.c.id) rows = engine.execute(s).fetchall() for i in moves.range(len(values)): v = values[i] self.assertEqual((v['id'], v['a'], v['foo']), rows[i]) # NOTE(boris-42): Update data about Table from DB. meta = MetaData() meta.bind = engine test_table = Table(table_name, meta, autoload=True) constraints = [ c for c in test_table.constraints if c.name == uc_name ] self.assertEqual(len(constraints), 0) self.assertEqual(len(test_table.constraints), 1) test_table.drop()
def restore_unique_constraint(table): # NOTE(Vek): So, sqlite doesn't really support dropping columns, # and so it gets implemented by dropping and recreating # the table...which of course means we completely lose # the unique constraint. We re-create it here to work # around this issue. uc_name = 'uniq_cell_name0deleted' columns = ('name', 'deleted') uc = UniqueConstraint(*columns, table=table, name=uc_name) uc.create()
def upgrade(migrate_engine): utils.drop_unique_constraint(migrate_engine, TABLE_NAME, OLD_UC_NAME, OLD_COLUMN) meta = MetaData(bind=migrate_engine) t = Table(TABLE_NAME, meta, autoload=True) if migrate_engine.name == "mysql": index = Index(OLD_COLUMN, t.c[OLD_COLUMN], unique=True) index.drop() uc = UniqueConstraint(*COLUMNS, table=t, name=UC_NAME) uc.create()
def downgrade(migrate_engine): image_members = _get_image_members_table(migrate_engine) if (migrate_engine.name == 'mysql' or migrate_engine.name == 'postgresql'): _sanitize(migrate_engine, image_members) UniqueConstraint('image_id', name=NEW_KEYNAME, table=image_members).drop() UniqueConstraint('image_id', 'member', name=_get_original_keyname(migrate_engine.name), table=image_members).create()
def downgrade(migrate_engine): utils.drop_unique_constraint(migrate_engine, TABLE_NAME, UC_NAME, *COLUMNS) meta = MetaData(bind=migrate_engine) t = Table(TABLE_NAME, meta, autoload=True) delete_statement = t.delete().where(t.c.deleted != 0) migrate_engine.execute(delete_statement) uc = UniqueConstraint(OLD_COLUMN, table=t, name=OLD_UC_NAME) uc.create() if migrate_engine.name == "mysql": index = Index(OLD_COLUMN, t.c[OLD_COLUMN], unique=True) index.create()
def upgrade(migrate_engine): """ This database upgrade drops the old unique constraint and creates new unique constraint for the kube_app table. """ meta = MetaData() meta.bind = migrate_engine kube_app = Table('kube_app', meta, autoload=True) UniqueConstraint('name', table=kube_app).drop() UniqueConstraint('name', 'app_version', table=kube_app, name='u_app_name_version').create()
def upgrade(migrate_engine): """Function enforces non-null value for keypairs name field.""" meta = MetaData(bind=migrate_engine) key_pairs = Table('key_pairs', meta, autoload=True) # Note: Since we are altering name field, this constraint on name needs to # first be dropped before we can alter name. We then re-create the same # constraint. It was first added in 216_havana.py so no need to remove # constraint on downgrade. UniqueConstraint('user_id', 'name', 'deleted', table=key_pairs, name='uniq_key_pairs0user_id0name0deleted').drop() key_pairs.c.name.alter(nullable=False) UniqueConstraint('user_id', 'name', 'deleted', table=key_pairs, name='uniq_key_pairs0user_id0name0deleted').create()
def _uc_rename(migrate_engine, upgrade=True): UC_DATA.update(UC_SPEC_DB_DATA[migrate_engine.name]) meta = MetaData(bind=migrate_engine) for table in UC_DATA: t = Table(table, meta, autoload=True) for columns, old_uc_name in UC_DATA[table]: new_uc_name = "uniq_{0}0{1}".format(table, "0".join(columns)) if table in constraint_names and migrate_engine.name == "mysql": instances = Table("instances", meta, autoload=True) ForeignKeyConstraint( columns=[t.c.instance_uuid], refcolumns=[instances.c.uuid], name=constraint_names[table] ).drop(engine=migrate_engine) if upgrade: old_name, new_name = old_uc_name, new_uc_name else: old_name, new_name = new_uc_name, old_uc_name utils.drop_unique_constraint(migrate_engine, table, old_name, *(columns)) UniqueConstraint(*columns, table=t, name=new_name).create() if table in constraint_names and migrate_engine.name == "mysql": ForeignKeyConstraint( columns=[t.c.instance_uuid], refcolumns=[instances.c.uuid], name=constraint_names[table] ).create(engine=migrate_engine)
def define_notifications_table(meta): notifications = Table('notifications', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Integer), Column('id', Integer, primary_key=True, nullable=False), Column('notification_uuid', String(36), nullable=False), Column('generated_time', DateTime, nullable=False), Column('source_host_uuid', String(36), nullable=False ), Column('type', String(length=36), nullable=False), Column('payload', Text), Column('status', Enum('new', 'running', 'error', 'failed', 'ignored', 'finished', name='notification_status'), nullable=False), UniqueConstraint('notification_uuid', name='uniq_notifications0uuid'), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) return notifications
def test_drop_unique_constraint_in_sqlite_fk_recreate(self): engine = self.engines['sqlite'] meta = MetaData() meta.bind = engine parent_table = Table( 'table0', meta, Column('id', Integer, primary_key=True), Column('foo', Integer), ) parent_table.create() table_name = 'table1' table = Table(table_name, meta, Column('id', Integer, primary_key=True), Column('baz', Integer), Column('bar', Integer, ForeignKey("table0.id")), UniqueConstraint('baz', name='constr1')) table.create() utils.drop_unique_constraint(engine, table_name, 'constr1', 'baz') insp = reflection.Inspector.from_engine(engine) f_keys = insp.get_foreign_keys(table_name) self.assertEqual(len(f_keys), 1) f_key = f_keys[0] self.assertEqual(f_key['referred_table'], 'table0') self.assertEqual(f_key['referred_columns'], ['id']) self.assertEqual(f_key['constrained_columns'], ['bar'])
def upgrade(migrate_engine): image_members = _get_image_members_table(migrate_engine) if migrate_engine.name in ('mysql', 'postgresql'): try: UniqueConstraint('image_id', name=_get_original_keyname(migrate_engine.name), table=image_members).drop() except (OperationalError, ProgrammingError, db_exception.DBError): UniqueConstraint('image_id', name=_infer_original_keyname(image_members), table=image_members).drop() UniqueConstraint('image_id', 'member', 'deleted_at', name=NEW_KEYNAME, table=image_members).create()
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine dialect = migrate_engine.url.get_dialect().name aggregates = Table('aggregates', meta, autoload=True) if dialect.startswith('sqlite'): aggregates.c.name.alter(unique=False) elif dialect.startswith('postgres'): ucon = UniqueConstraint('name', name='aggregates_name_key', table=aggregates) ucon.drop() else: col2 = aggregates.c.name UniqueConstraint(col2, name='name').drop()
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine datastore_versions = Table('datastore_versions', meta, autoload=True) # drop the unique index on the name column - unless we are # using sqlite - it doesn't support dropping unique constraints uc = None if migrate_engine.name == "mysql": uc = UniqueConstraint('name', table=datastore_versions, name='name') elif migrate_engine.name == "postgresql": uc = UniqueConstraint('name', table=datastore_versions, name='datastore_versions_name_key') if uc: try: uc.drop() except (OperationalError, InternalError) as e: logger.info(e)
def define_tables(meta): credentials = Table('credentials', meta, Column('created_at', DateTime(timezone=False), nullable=False), Column('updated_at', DateTime(timezone=False)), Column('deleted_at', DateTime(timezone=False)), Column('deleted', Integer), Column('owner_user_id', String(36)), Column('owner_project_id', String(36)), Column('id', String(36), nullable=False, primary_key=True), Column('name', String(255), nullable=False, primary_key=True), Column('value', Text(), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8') credentials_association = Table('credentials_association', meta, Column('created_at', DateTime(timezone=False), nullable=False), Column('updated_at', DateTime(timezone=False)), Column('deleted_at', DateTime(timezone=False)), Column('deleted', Integer), Column('owner_user_id', String(36)), Column('owner_project_id', String(36)), Column('id', Integer, primary_key=True, autoincrement=True), Column('project_id', String(36), nullable=False), Column('credential_id', String(36), ForeignKey('credentials.id'), nullable=False), UniqueConstraint( 'project_id', 'deleted', name='uniq_credentials_association0' 'project_id0deleted'), mysql_engine='InnoDB', mysql_charset='utf8') return [credentials, credentials_association]
def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns, **col_name_col_instance): """ Drop unique constraint from table. """ meta = MetaData() meta.bind = migrate_engine t = Table(table_name, meta, autoload=True) if migrate_engine.name == "sqlite": override_cols = [ _get_not_supported_column(col_name_col_instance, col.name) for col in t.columns if isinstance(col.type, NullType) ] for col in override_cols: t.columns.replace(col) uc = UniqueConstraint(*columns, table=t, name=uc_name) uc.drop()
def upgrade(migrate_engine): meta.bind = migrate_engine try: services.create() except Exception: LOG.info(repr(services)) LOG.exception(_('Exception while creating services table.')) raise UniqueConstraint('host', 'topic', 'deleted', table=services, name='uniq_services0host0topic0deleted').create() UniqueConstraint('host', 'binary', 'deleted', table=services, name='uniq_services0host0binary0deleted').create()
def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine instance_info_caches = Table('instance_info_caches', meta, autoload=True) if migrate_engine.name == "mysql": UniqueConstraint('instance_uuid', table=instance_info_caches, name=OLD_MYSQL_NAME).create() UniqueConstraint('instance_uuid', table=instance_info_caches, name=NEW_MYSQL_NAME).drop() if migrate_engine.name == "postgresql": UniqueConstraint('instance_uuid', table=instance_info_caches, name=OLD_PG_NAME).create() UniqueConstraint('instance_uuid', table=instance_info_caches, name=NEW_PG_NAME).drop()
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # add personality and resource to service_parameter table service_parameter = Table('service_parameter', meta, Column('id', Integer, primary_key=True, nullable=False), mysql_engine=ENGINE, mysql_charset=CHARSET, autoload=True) service_parameter.create_column(Column('personality', String(255))) service_parameter.create_column(Column('resource', String(255))) # Remove the existing unique constraint to add a unique constraint # with personality and resource. UniqueConstraint('service', 'section', 'name', table=service_parameter, name='u_servicesectionname').drop() UniqueConstraint('service', 'section', 'name', 'personality', 'resource', table=service_parameter, name='u_service_section_name_personality_resource').create()
def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) key_pairs = Table(TABLE_NAME, meta, autoload=True) utils.drop_old_duplicate_entries_from_table(migrate_engine, TABLE_NAME, True, *UC_COLUMNS) old_idx = None #Drop old index because the new UniqueConstraint can be used instead. for index in key_pairs.indexes: if index.name == OLD_IDX_NAME: index.drop() old_idx = index #index.drop() in SQLAlchemy-migrate will issue a DROP INDEX statement to #the DB but WILL NOT update the table metadata to remove the `Index` #object. This can cause subsequent calls like drop or create constraint #on that table to fail.The solution is to update the table metadata to #reflect the now dropped column. if old_idx: key_pairs.indexes.remove(old_idx) uc = UniqueConstraint(*(UC_COLUMNS), table=key_pairs, name=UC_NAME) uc.create()
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # NOTE(dprince): Rename the unique key constraints for both MySQL # and PostgreSQL so they reflect the most recent UUID conversions # from Folsom. instance_info_caches = Table('instance_info_caches', meta, autoload=True) if migrate_engine.name == "mysql": UniqueConstraint('instance_uuid', table=instance_info_caches, name=NEW_MYSQL_NAME).create() UniqueConstraint('instance_uuid', table=instance_info_caches, name=OLD_MYSQL_NAME).drop() if migrate_engine.name == "postgresql": UniqueConstraint('instance_uuid', table=instance_info_caches, name=NEW_PG_NAME).create() UniqueConstraint('instance_uuid', table=instance_info_caches, name=OLD_PG_NAME).drop()
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine partition = Table('partition', meta, autoload=True) # Add the 'device_node' column to the partition table. partition.create_column(Column('device_node', String(64))) # Add unique constraint for a partition's device path. UniqueConstraint('device_path', 'forihostid', table=partition, name='u_partition_path_host_id').create()
def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) conductor = Table( "conductors", meta, Column("id", Integer, primary_key=True, nullable=False), Column("hostname", String(length=255), nullable=False), Column("drivers", Text), Column("created_at", DateTime), Column("updated_at", DateTime), mysql_engine=ENGINE, mysql_charset=CHARSET, ) try: conductor.create() except Exception: LOG.info(repr(conductor)) LOG.exception(_("Exception while creating table.")) raise uc = UniqueConstraint("hostname", table=conductor, name="uniq_conductors0hostname") uc.create()
def upgrade(migrate_engine): """ This database upgrade removes unused attributes from pci_devices and device_labels tables. """ meta = MetaData() meta.bind = migrate_engine pci_devices = Table('pci_devices', meta, autoload=True) pci_devices.drop_column(Column('status')) pci_devices.drop_column(Column('needs_firmware_update')) device_labels = Table('device_labels', meta, autoload=True) device_labels.drop_column(Column('fpgadevice_id')) UniqueConstraint('pcidevice_id', 'label_key', table=device_labels, name='u_pcidevice_id@label_key').drop() return True
def _uc_rename(migrate_engine, upgrade=True): UC_DATA.update(UC_SPEC_DB_DATA[migrate_engine.name]) meta = MetaData(bind=migrate_engine) for table in UC_DATA: t = Table(table, meta, autoload=True) for columns, old_uc_name in UC_DATA[table]: new_uc_name = "uniq_{0}0{1}".format(table, "0".join(columns)) if table in constraint_names and migrate_engine.name == "mysql": instances = Table("instances", meta, autoload=True) ForeignKeyConstraint( columns=[t.c.instance_uuid], refcolumns=[instances.c.uuid], name=constraint_names[table]).drop(engine=migrate_engine) if upgrade: old_name, new_name = old_uc_name, new_uc_name else: old_name, new_name = new_uc_name, old_uc_name utils.drop_unique_constraint(migrate_engine, table, old_name, *(columns)) if (new_name != 'virtual_interfaces_instance_uuid_fkey' or migrate_engine.name != "mysql"): # NOTE(jhesketh): The virtual_interfaces_instance_uuid_fkey # key always existed in the table, we don't need to create # a unique constraint. See bug/1207344 UniqueConstraint(*columns, table=t, name=new_name).create() if table in constraint_names and migrate_engine.name == "mysql": ForeignKeyConstraint( columns=[t.c.instance_uuid], refcolumns=[instances.c.uuid], name=constraint_names[table]).create(engine=migrate_engine)
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine image_sync = Table('imge_sync', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Integer), Column('id', Integer, primary_key=True), Column('image_id', String(36), nullable=False), Column('project_id', String(255)), Column('volume_id', String(36)), Column('status', String(36)), Index('image_id_deleted_idx', 'image_id', 'deleted'), Index('image_id_az_deleted_idx', 'image_id', 'project_id', 'deleted'), UniqueConstraint('image_id', 'project_id', 'deleted'), mysql_engine='InnoDB', mysql_charset='utf8') image_sync.create()
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine datastore_versions = Table('datastore_versions', meta, autoload=True) # drop the unique index on the name column - unless we are # using sqlite - it doesn't support dropping unique constraints uc = None if migrate_engine.name == "mysql": uc = UniqueConstraint('name', table=datastore_versions, name='name') elif migrate_engine.name == "postgresql": uc = UniqueConstraint('name', table=datastore_versions, name='datastore_versions_name_key') if uc: try: uc.drop() except OperationalError as e: logger.info(e)
def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) domain_quota = Table('domain_quotas', meta, Column('id', Integer, primary_key=True, nullable=False), Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Integer), Column('domain_id', String(255)), Column('resource', String(255), nullable=False), Column('hard_limit', Integer()), mysql_engine='InnoDB', mysql_charset='utf8') domain_quota_usage = Table('domain_quota_usages', meta, Column('id', Integer, primary_key=True, nullable=False), Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Integer), Column('domain_id', String(255)), Column('resource', String(255), nullable=False), Column('in_use', Integer, nullable=False), Column('reserved', Integer, nullable=False), Column('until_refresh', Integer), mysql_engine='InnoDB', mysql_charset='utf8') domain_reservation = Table('domain_reservations', meta, Column('id', Integer, primary_key=True, nullable=False), Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Integer), Column('uuid', String(length=36), nullable=False), Column('domain_id', String(255)), Column('usage_id', Integer, nullable=False), Column('resource', String(length=255)), Column('delta', Integer, nullable=False), Column('expire', DateTime), mysql_engine='InnoDB', mysql_charset='utf8' ) tables = [domain_quota, domain_quota_usage, domain_reservation] for table in tables: try: table.create() utils.create_shadow_table(migrate_engine, table=table) except Exception: LOG.info(repr(table)) LOG.exception(_('Exception while creating table.')) raise indexes = [ Index('domain_quotas_domain_id_deleted_idx', 'domain_id', 'deleted'), #DomainQuotaUsages Index('ix_domain_quota_usages_domain_id', 'domain_id'), #DomainReservation Index('ix_domain_reservations_id', 'domain_id'), Index('domain_reservations_uuid_idx', 'uuid'), ] # Common indexes # for index in indexes: # print "<<<<<<<<<<<<<<<INDEXES>>>>>>>>>>>>>>>>>>>" # print index # index.create(migrate_engine) fkeys = [ [[domain_reservation.c.usage_id], [domain_quota_usage.c.id], 'domain_reservations_ibfk_1'] ] for fkey_pair in fkeys: if migrate_engine.name == 'mysql': # For MySQL we name our fkeys explicitly so they match Folsom fkey = ForeignKeyConstraint(columns=fkey_pair[0], refcolumns=fkey_pair[1], name=fkey_pair[2]) fkey.create() elif migrate_engine.name == 'postgresql': # PostgreSQL names things like it wants (correct and compatible!) fkey = ForeignKeyConstraint(columns=fkey_pair[0], refcolumns=fkey_pair[1]) fkey.create() uniq_name = "uniq_domain_quotas0domain_id0resource0deleted" uc_domain_quota = UniqueConstraint("domain_id", "resource", "deleted", table=domain_quota, name=uniq_name) uc_domain_quota.create()