def downgrade(migrate_engine): meta = MetaData(bind=migrate_engine) rave_observation = Table('rave_observation', meta, autoload=True) rave_observation.c.valid_fields_bitmask.drop() new_key = constraint.PrimaryKeyConstraint("station", "date", "time", "accumulation_period", table=rave_observation) new_key.drop() for primary_key in rave_observation.primary_key: primary_key.primary_key=False old_key = constraint.PrimaryKeyConstraint("station", "date", "time", "type", table=rave_observation) old_key.create()
def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) rave_observation = Table('rave_observation', meta, autoload=True) bitmask_column = Column("valid_fields_bitmask", Integer, nullable=True) bitmask_column.create(rave_observation) old_key = constraint.PrimaryKeyConstraint("station", "date", "time", "type", table=rave_observation) old_key.drop() for primary_key in rave_observation.primary_key: primary_key.primary_key=False new_key = constraint.PrimaryKeyConstraint("station", "date", "time", "accumulation_period", table=rave_observation) new_key.create()
def downgrade(migrate_engine): if migrate_engine.name == 'sqlite': downgrade_sqlite(migrate_engine) return meta = sqlalchemy.MetaData(bind=migrate_engine) event_table = sqlalchemy.Table('event', meta, autoload=True) event_id = sqlalchemy.Column('tmp_id', sqlalchemy.String(length=36), default=lambda: str(uuid.uuid4)) event_id.create(event_table) event_list = event_table.select().execute() for event in event_list: values = {'tmp_id': event.uuid} update = event_table.update().where( event_table.c.uuid == event.uuid).values(values) migrate_engine.execute(update) event_table.c.id.drop() event_table.c.uuid.drop() cons = constraint.PrimaryKeyConstraint('tmp_id', table=event_table) cons.create() event_table.c.tmp_id.alter('id', default=lambda: str(uuid.uuid4)) if migrate_engine.name == 'postgresql': sequence = sqlalchemy.Sequence('evt') sqlalchemy.schema.DropSequence(sequence, bind=migrate_engine).execute()
def visit_column(self, column): """Create a column (table already exists). :param column: column object :type column: :class:`sqlalchemy.Column` instance """ if column.default is not None: self.traverse_single(column.default) table = self.start_alter_table(column) self.append("ADD ") self.append(self.get_column_specification(column)) for cons in column.constraints: self.traverse_single(cons) self.execute() # ALTER TABLE STATEMENTS # add indexes and unique constraints if column.index_name: Index(column.index_name, column).create() elif column.unique_name: constraint.UniqueConstraint(column, name=column.unique_name).create() # SA bounds FK constraints to table, add manually for fk in column.foreign_keys: self.add_foreignkey(fk.constraint) # add primary key constraint if needed if column.primary_key_name: cons = constraint.PrimaryKeyConstraint( column, name=column.primary_key_name) cons.create()
def visit_column(self, column): nullable = True if not column.nullable: nullable = False column.nullable = True table = self.start_alter_table(column) self.append("ADD COLUMN ") self.append(self.get_column_specification(column)) for cons in column.constraints: self.traverse_single(cons) if column.default is not None: self.traverse_single(column.default) self.execute() #ALTER TABLE STATEMENTS if not nullable: self.start_alter_table(column) self.append("ALTER COLUMN %s SET NOT NULL" % self.preparer.format_column(column)) self.execute() self.append("CALL SYSPROC.ADMIN_CMD('REORG TABLE %s')" % self.preparer.format_table(table)) self.execute() # add indexes and unique constraints if column.index_name: Index(column.index_name, column).create() elif column.unique_name: constraint.UniqueConstraint(column, name=column.unique_name).create() # SA bounds FK constraints to table, add manually for fk in column.foreign_keys: self.add_foreignkey(fk.constraint) # add primary key constraint if needed if column.primary_key_name: pk = constraint.PrimaryKeyConstraint( column, name=column.primary_key_name) pk.create() self.append("COMMIT") self.execute() self.append("CALL SYSPROC.ADMIN_CMD('REORG TABLE %s')" % self.preparer.format_table(table)) self.execute()
def upgrade(migrate_engine): if migrate_engine.name == 'sqlite': upgrade_sqlite(migrate_engine) return meta = sqlalchemy.MetaData(bind=migrate_engine) event_table = sqlalchemy.Table('event', meta, autoload=True) event_uuid = sqlalchemy.Column('uuid', sqlalchemy.String(length=36), default=lambda: str(uuid.uuid4)) event_table.create_column(event_uuid) if migrate_engine.name == 'postgresql': sequence = sqlalchemy.Sequence('evt') sqlalchemy.schema.CreateSequence(sequence, bind=migrate_engine).execute() event_id = sqlalchemy.Column( 'tmp_id', sqlalchemy.Integer, server_default=sqlalchemy.text("nextval('evt')")) else: event_id = sqlalchemy.Column('tmp_id', sqlalchemy.Integer) event_table.create_column(event_id) fake_autoincrement = itertools.count(1) event_list = event_table.select().order_by( sqlalchemy.sql.expression.asc( event_table.c.created_at)).execute().fetchall() for event in event_list: values = {'tmp_id': fake_autoincrement.next(), 'uuid': event.id} update = event_table.update().where( event_table.c.id == event.id).values(values) migrate_engine.execute(update) cons = constraint.UniqueConstraint('uuid', table=event_table) cons.create() event_table.c.id.drop() event_table.c.tmp_id.alter('id', sqlalchemy.Integer) cons = constraint.PrimaryKeyConstraint('tmp_id', table=event_table) cons.create() event_table.c.tmp_id.alter(sqlalchemy.Integer, autoincrement=True)
def downgrade(migrate_engine): if migrate_engine.name == 'sqlite': downgrade_sqlite(migrate_engine) return meta = sqlalchemy.MetaData(bind=migrate_engine) event_table = sqlalchemy.Table('event', meta, autoload=True) event_id_column_kwargs = {} if migrate_engine.name == 'ibm_db_sa': event_id_column_kwargs['nullable'] = False event_id = sqlalchemy.Column('tmp_id', sqlalchemy.String(length=36), default=lambda: str(uuid.uuid4), **event_id_column_kwargs) event_id.create(event_table) event_list = event_table.select().execute() for event in event_list: values = {'tmp_id': event.uuid} update = event_table.update().where( event_table.c.uuid == event.uuid).values(values) migrate_engine.execute(update) event_table.c.id.drop() event_table.c.uuid.drop() cons = constraint.PrimaryKeyConstraint('tmp_id', table=event_table) cons.create() alter_kwargs = {} # NOTE(mriedem): DB2 won't allow a primary key on a nullable column so # we have to make it non-nullable. if migrate_engine.name == 'ibm_db_sa': alter_kwargs['nullable'] = False event_table.c.tmp_id.alter('id', default=lambda: str(uuid.uuid4), **alter_kwargs) if migrate_engine.name == 'postgresql': sequence = sqlalchemy.Sequence('evt') sqlalchemy.schema.DropSequence(sequence, bind=migrate_engine).execute()
def _pk_constraint(self, table, column, status): """Create a primary key constraint from a table, column. Status: true if the constraint is being added; false if being dropped """ if isinstance(column, basestring): column = getattr(table.c, name) ret = constraint.PrimaryKeyConstraint(*table.primary_key) if status: # Created PK ret.c.append(column) else: # Dropped PK names = [c.name for c in cons.c] index = names.index(col.name) del ret.c[index] # Allow explicit PK name assignment if isinstance(pk, basestring): ret.name = pk return ret
def upgrade_resource(migrate_engine): meta = sqlalchemy.MetaData(bind=migrate_engine) res_table = sqlalchemy.Table('resource', meta, autoload=True) res_uuid_column_kwargs = {} if migrate_engine.name == 'ibm_db_sa': # NOTE(mriedem): DB2 10.5 doesn't support unique constraints over # nullable columns, it creates a unique index instead, so we have # to make the uuid column non-nullable in the DB2 case. res_uuid_column_kwargs['nullable'] = False res_uuid = sqlalchemy.Column('uuid', sqlalchemy.String(length=36), default=lambda: str(uuid.uuid4), **res_uuid_column_kwargs) res_table.create_column(res_uuid) if migrate_engine.name == 'postgresql': sequence = sqlalchemy.Sequence('res') sqlalchemy.schema.CreateSequence(sequence, bind=migrate_engine).execute() res_id = sqlalchemy.Column( 'tmp_id', sqlalchemy.Integer, server_default=sqlalchemy.text("nextval('res')")) else: res_id_column_kwargs = {} if migrate_engine.name == 'ibm_db_sa': # NOTE(mriedem): This is turned into a primary key constraint # later so it must be non-nullable. res_id_column_kwargs['nullable'] = False res_id = sqlalchemy.Column('tmp_id', sqlalchemy.Integer, **res_id_column_kwargs) res_table.create_column(res_id) fake_autoincrement = itertools.count(1) res_list = res_table.select().order_by( sqlalchemy.sql.expression.asc( res_table.c.created_at)).execute().fetchall() for res in res_list: values = {'tmp_id': fake_autoincrement.next(), 'uuid': res.id} update = res_table.update().where( res_table.c.id == res.id).values(values) migrate_engine.execute(update) constraint_kwargs = {'table': res_table} if migrate_engine.name == 'ibm_db_sa': # NOTE(mriedem): DB2 gives a random name to the unique constraint # if one is not provided so let's set the standard name ourselves. constraint_kwargs['name'] = 'uniq_resource0uuid0' cons = constraint.UniqueConstraint('uuid', **constraint_kwargs) cons.create() if migrate_engine.name == 'postgresql': # resource_id_seq will be dropped in the case of removing `id` column # set owner to none for saving this sequence (it is needed in the # earlier migration) migrate_engine.execute('alter sequence resource_id_seq owned by none') res_table.c.id.drop() alter_kwargs = {} if migrate_engine.name == 'ibm_db_sa': alter_kwargs['nullable'] = False res_table.c.tmp_id.alter('id', sqlalchemy.Integer, **alter_kwargs) cons = constraint.PrimaryKeyConstraint('tmp_id', table=res_table) cons.create() if migrate_engine.name == 'ibm_db_sa': # NOTE(chenxiao): For DB2, setting "ID" column "autoincrement=True" # can't make sense after above "tmp_id=>id" transformation, # so should work around it. sql = ("ALTER TABLE RESOURCE ALTER COLUMN ID SET GENERATED BY " "DEFAULT AS IDENTITY (START WITH 1, INCREMENT BY 1)") migrate_engine.execute(sql) else: res_table.c.tmp_id.alter(sqlalchemy.Integer, autoincrement=True)