def _uc_rename(migrate_engine, upgrade=True): UC_DATA.update(UC_SPEC_DB_DATA[migrate_engine.name]) meta = MetaData(bind=migrate_engine) for table in UC_DATA: t = Table(table, meta, autoload=True) for columns, old_uc_name in UC_DATA[table]: new_uc_name = "uniq_{0}0{1}".format(table, "0".join(columns)) if table in constraint_names and migrate_engine.name == "mysql": instances = Table("instances", meta, autoload=True) if upgrade and (table == 'instance_info_caches' or table == 'virtual_interfaces'): # NOTE(jhesketh): migration 133_folsom.py accidentally # changed the name of the FK constraint # from instance_info_caches_ibfk_1 to # instance_info_caches_instance_uuid_fkey # meaning databases who have upgraded from # before folsom have the old fkey. # We need to make sure all of the fkeys are # dropped and then add in the correct fkey # regardless. (This also means when 185 is # downgraded the user will keep the correct # fkey as defined in 133). # There also seems to be a case where both # versions of the fkey are present in a # database so we check for each. # Similarly on table virtual_interfaces it # is possible to get into a state of having # both virtual_interfaces_ibfk_1 and # virtual_interfaces_instance_uuid_fkey # present in the virtual_interfaces table. for index_name in \ ['instance_info_caches_ibfk_1', 'instance_info_caches_instance_uuid_fkey', 'virtual_interfaces_ibfk_1', 'virtual_interfaces_instance_uuid_fkey']: if index_name in [fk.name for fk in t.foreign_keys]: ForeignKeyConstraint( columns=[t.c.instance_uuid], refcolumns=[instances.c.uuid], name=index_name).drop(engine=migrate_engine) else: ForeignKeyConstraint(columns=[t.c.instance_uuid], refcolumns=[instances.c.uuid], name=constraint_names[table]).drop( engine=migrate_engine) if upgrade: old_name, new_name = old_uc_name, new_uc_name else: old_name, new_name = new_uc_name, old_uc_name utils.drop_unique_constraint(migrate_engine, table, old_name, *(columns)) if (new_name != 'virtual_interfaces_instance_uuid_fkey' or migrate_engine.name != "mysql"): # NOTE(jhesketh): The virtual_interfaces_instance_uuid_fkey # key always existed in the table, we don't need to create # a unique constraint. See bug/1207344 UniqueConstraint(*columns, table=t, name=new_name).create() if table in constraint_names and migrate_engine.name == "mysql": ForeignKeyConstraint( columns=[t.c.instance_uuid], refcolumns=[instances.c.uuid], name=constraint_names[table]).create(engine=migrate_engine)
def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # Drop column from snapshots table if migrate_engine.name == 'mysql': # MySQL cannot drop column cgsnapshot_id until the foreign key # constraint is removed. So remove the foreign key first, and # then drop the column. table = Table('snapshots', meta, autoload=True) ref_table = Table('snapshots', meta, autoload=True) params = { 'columns': [table.c['cgsnapshot_id']], 'refcolumns': [ref_table.c['id']], 'name': 'snapshots_ibfk_1' } try: fkey = ForeignKeyConstraint(**params) fkey.drop() except Exception: LOG.error( _("Dropping foreign key 'cgsnapshot_id' in " "the 'snapshots' table failed.")) snapshots = Table('snapshots', meta, autoload=True) cgsnapshot_id = snapshots.columns.cgsnapshot_id snapshots.drop_column(cgsnapshot_id) # Drop column from volumes table if migrate_engine.name == 'mysql': # MySQL cannot drop column consistencygroup_id until the foreign # key constraint is removed. So remove the foreign key first, # and then drop the column. table = Table('volumes', meta, autoload=True) ref_table = Table('volumes', meta, autoload=True) params = { 'columns': [table.c['consistencygroup_id']], 'refcolumns': [ref_table.c['id']], 'name': 'volumes_ibfk_1' } try: fkey = ForeignKeyConstraint(**params) fkey.drop() except Exception: LOG.error( _("Dropping foreign key 'consistencygroup_id' in " "the 'volumes' table failed.")) volumes = Table('volumes', meta, autoload=True) consistencygroup_id = volumes.columns.consistencygroup_id volumes.drop_column(consistencygroup_id) # Drop table cgsnapshots = Table('cgsnapshots', meta, autoload=True) try: cgsnapshots.drop() except Exception: LOG.error(_("cgsnapshots table not dropped")) raise # Drop table consistencygroups = Table('consistencygroups', meta, autoload=True) try: consistencygroups.drop() except Exception: LOG.error(_("consistencygroups table not dropped")) raise
def downgrade(migrate_engine): metadata.bind = migrate_engine metadata.reflect() # NOTE: all new data added in the upgrade method is eliminated here via table drops # Drop 1 foreign key constraint from the metadata_file table MetadataFile_table = Table("metadata_file", metadata, autoload=True) LibraryDatasetDatasetAssociation_table = Table( "library_dataset_dataset_association", metadata, autoload=True) try: cons = ForeignKeyConstraint( [MetadataFile_table.c.lda_id], [LibraryDatasetDatasetAssociation_table.c.id], name='metadata_file_lda_id_fkey') # Drop the constraint cons.drop() except Exception: log.exception( "Dropping foreign key constraint 'metadata_file_lda_id_fkey' from table 'metadata_file' failed." ) # Drop 1 foreign key constraint from the history_dataset_association table HistoryDatasetAssociation_table = Table("history_dataset_association", metadata, autoload=True) LibraryDatasetDatasetAssociation_table = Table( "library_dataset_dataset_association", metadata, autoload=True) try: cons = ForeignKeyConstraint( [ HistoryDatasetAssociation_table.c. copied_from_library_dataset_dataset_association_id ], [LibraryDatasetDatasetAssociation_table.c.id], name= 'history_dataset_association_copied_from_library_dataset_da_fkey') # Drop the constraint cons.drop() except Exception: log.exception( "Dropping foreign key constraint 'history_dataset_association_copied_from_library_dataset_da_fkey' from table 'history_dataset_association' failed." ) # Drop all of the new tables above TABLES = [ UserGroupAssociation_table, UserRoleAssociation_table, GroupRoleAssociation_table, Group_table, DatasetPermissions_table, LibraryPermissions_table, LibraryFolderPermissions_table, LibraryDatasetPermissions_table, LibraryDatasetDatasetAssociationPermissions_table, LibraryItemInfoPermissions_table, LibraryItemInfoTemplatePermissions_table, DefaultUserPermissions_table, DefaultHistoryPermissions_table, Role_table, LibraryDatasetDatasetInfoAssociation_table, LibraryDataset_table, LibraryDatasetDatasetAssociation_table, LibraryDatasetDatasetInfoTemplateAssociation_table, JobExternalOutputMetadata_table, Library_table, LibraryFolder_table, LibraryItemInfoTemplateElement_table, LibraryInfoTemplateAssociation_table, LibraryFolderInfoTemplateAssociation_table, LibraryDatasetInfoTemplateAssociation_table, LibraryInfoAssociation_table, LibraryFolderInfoAssociation_table, LibraryDatasetInfoAssociation_table, LibraryItemInfoElement_table, LibraryItemInfo_table, LibraryItemInfoTemplate_table, ] for table in TABLES: drop_table(table) # Drop the index on the Job.state column - changeset 2192 drop_index('ix_job_state', 'job', 'state', metadata) # Drop 1 column from the stored_workflow table - changeset 2328 drop_column('importable', 'stored_workflow', metadata) # Drop 1 column from the metadata_file table drop_column('lda_id', 'metadata_file', metadata) # Drop 1 column from the history_dataset_association table drop_column('copied_from_library_dataset_dataset_association_id', HistoryDatasetAssociation_table) # Drop 2 columns from the galaxy_user table User_table = Table("galaxy_user", metadata, autoload=True) drop_column('deleted', User_table) drop_column('purged', User_table)
def upgrade(migrate_engine): print(__doc__) metadata.bind = migrate_engine metadata.reflect() # Add 2 new columns to the galaxy_user table User_table = Table("galaxy_user", metadata, autoload=True) col = Column('deleted', Boolean, index=True, default=False) add_column(col, User_table, metadata, index_name='ix_galaxy_user_deleted') col = Column('purged', Boolean, index=True, default=False) add_column(col, User_table, metadata, index_name='ix_galaxy_user_purged') # Add 1 new column to the history_dataset_association table HistoryDatasetAssociation_table = Table("history_dataset_association", metadata, autoload=True) col = Column('copied_from_library_dataset_dataset_association_id', Integer, nullable=True) add_column(col, HistoryDatasetAssociation_table, metadata) # Add 1 new column to the metadata_file table MetadataFile_table = Table("metadata_file", metadata, autoload=True) col = Column('lda_id', Integer, index=True, nullable=True) add_column(col, MetadataFile_table, metadata, index_name='ix_metadata_file_lda_id') # Add 1 new column to the stored_workflow table - changeset 2328 StoredWorkflow_table = Table( "stored_workflow", metadata, Column("latest_workflow_id", Integer, ForeignKey("workflow.id", use_alter=True, name='stored_workflow_latest_workflow_id_fk'), index=True), autoload=True, extend_existing=True) col = Column('importable', Boolean, default=False) add_column(col, StoredWorkflow_table, metadata) # Create an index on the Job.state column - changeset 2192 add_index('ix_job_state', 'job', 'state', metadata) # Add all of the new tables above metadata.create_all() # Add 1 foreign key constraint to the history_dataset_association table LibraryDatasetDatasetAssociation_table = Table( "library_dataset_dataset_association", metadata, autoload=True) try: cons = ForeignKeyConstraint( [ HistoryDatasetAssociation_table.c. copied_from_library_dataset_dataset_association_id ], [LibraryDatasetDatasetAssociation_table.c.id], name= 'history_dataset_association_copied_from_library_dataset_da_fkey') # Create the constraint cons.create() except Exception: log.exception( "Adding foreign key constraint 'history_dataset_association_copied_from_library_dataset_da_fkey' to table 'history_dataset_association' failed." ) # Add 1 foreign key constraint to the metadata_file table LibraryDatasetDatasetAssociation_table = Table( "library_dataset_dataset_association", metadata, autoload=True) if migrate_engine.name != 'sqlite': # Sqlite can't alter table add foreign key. try: cons = ForeignKeyConstraint( [MetadataFile_table.c.lda_id], [LibraryDatasetDatasetAssociation_table.c.id], name='metadata_file_lda_id_fkey') # Create the constraint cons.create() except Exception: log.exception( "Adding foreign key constraint 'metadata_file_lda_id_fkey' to table 'metadata_file' failed." ) # Make sure we have at least 1 user cmd = "SELECT * FROM galaxy_user;" users = migrate_engine.execute(cmd).fetchall() if users: cmd = "SELECT * FROM role;" roles = migrate_engine.execute(cmd).fetchall() if not roles: # Create private roles for each user - pass 1 cmd = \ "INSERT INTO role " + \ "SELECT %s AS id," + \ "%s AS create_time," + \ "%s AS update_time," + \ "email AS name," + \ "email AS description," + \ "'private' As type," + \ "%s AS deleted " + \ "FROM galaxy_user " + \ "ORDER BY id;" cmd = cmd % (nextval(migrate_engine, 'role'), localtimestamp(migrate_engine), localtimestamp(migrate_engine), engine_false(migrate_engine)) migrate_engine.execute(cmd) # Create private roles for each user - pass 2 if migrate_engine.name in ['postgres', 'postgresql', 'sqlite']: cmd = "UPDATE role SET description = 'Private role for ' || description;" elif migrate_engine.name == 'mysql': cmd = "UPDATE role SET description = CONCAT( 'Private role for ', description );" migrate_engine.execute(cmd) # Create private roles for each user - pass 3 cmd = \ "INSERT INTO user_role_association " + \ "SELECT %s AS id," + \ "galaxy_user.id AS user_id," + \ "role.id AS role_id," + \ "%s AS create_time," + \ "%s AS update_time " + \ "FROM galaxy_user, role " + \ "WHERE galaxy_user.email = role.name " + \ "ORDER BY galaxy_user.id;" cmd = cmd % (nextval(migrate_engine, 'user_role_association'), localtimestamp(migrate_engine), localtimestamp(migrate_engine)) migrate_engine.execute(cmd) # Create default permissions for each user cmd = \ "INSERT INTO default_user_permissions " + \ "SELECT %s AS id," + \ "galaxy_user.id AS user_id," + \ "'manage permissions' AS action," + \ "user_role_association.role_id AS role_id " + \ "FROM galaxy_user " + \ "JOIN user_role_association ON user_role_association.user_id = galaxy_user.id " + \ "ORDER BY galaxy_user.id;" cmd = cmd % nextval(migrate_engine, 'default_user_permissions') migrate_engine.execute(cmd) # Create default history permissions for each active history associated with a user cmd = \ "INSERT INTO default_history_permissions " + \ "SELECT %s AS id," + \ "history.id AS history_id," + \ "'manage permissions' AS action," + \ "user_role_association.role_id AS role_id " + \ "FROM history " + \ "JOIN user_role_association ON user_role_association.user_id = history.user_id " + \ "WHERE history.purged = %s AND history.user_id IS NOT NULL;" cmd = cmd % (nextval( migrate_engine, 'default_history_permissions'), engine_false(migrate_engine)) migrate_engine.execute(cmd) # Create "manage permissions" dataset_permissions for all activate-able datasets cmd = \ "INSERT INTO dataset_permissions " + \ "SELECT %s AS id," + \ "%s AS create_time," + \ "%s AS update_time," + \ "'manage permissions' AS action," + \ "history_dataset_association.dataset_id AS dataset_id," + \ "user_role_association.role_id AS role_id " + \ "FROM history " + \ "JOIN history_dataset_association ON history_dataset_association.history_id = history.id " + \ "JOIN dataset ON history_dataset_association.dataset_id = dataset.id " + \ "JOIN user_role_association ON user_role_association.user_id = history.user_id " + \ "WHERE dataset.purged = %s AND history.user_id IS NOT NULL;" cmd = cmd % (nextval(migrate_engine, 'dataset_permissions'), localtimestamp(migrate_engine), localtimestamp(migrate_engine), engine_false(migrate_engine)) migrate_engine.execute(cmd)
def upgrade(migrate_engine): """Convert volume and snapshot id columns from int to varchar.""" meta = MetaData() meta.bind = migrate_engine dialect = migrate_engine.url.get_dialect().name volumes = Table('volumes', meta, autoload=True) snapshots = Table('snapshots', meta, autoload=True) iscsi_targets = Table('iscsi_targets', meta, autoload=True) volume_metadata = Table('volume_metadata', meta, autoload=True) sm_volume = Table('sm_volume', meta, autoload=True) block_device_mapping = Table('block_device_mapping', meta, autoload=True) try: fkeys = list(snapshots.c.volume_id.foreign_keys) if fkeys: fkey_name = fkeys[0].constraint.name ForeignKeyConstraint(columns=[snapshots.c.volume_id], refcolumns=[volumes.c.id], name=fkey_name).drop() fkeys = list(iscsi_targets.c.volume_id.foreign_keys) if fkeys: fkey_name = fkeys[0].constraint.name ForeignKeyConstraint(columns=[iscsi_targets.c.volume_id], refcolumns=[volumes.c.id], name=fkey_name).drop() fkeys = list(volume_metadata.c.volume_id.foreign_keys) if fkeys: fkey_name = fkeys[0].constraint.name ForeignKeyConstraint(columns=[volume_metadata.c.volume_id], refcolumns=[volumes.c.id], name=fkey_name).drop() fkeys = list(sm_volume.c.id.foreign_keys) if fkeys: fkey_name = fkeys[0].constraint.name ForeignKeyConstraint(columns=[sm_volume.c.id], refcolumns=[volumes.c.id], name=fkey_name).drop() fkeys = list(block_device_mapping.c.volume_id.foreign_keys) if fkeys: fkey_name = fkeys[0].constraint.name ForeignKeyConstraint(columns=[block_device_mapping.c.volume_id], refcolumns=[volumes.c.id], name=fkey_name).drop() fkeys = list(block_device_mapping.c.snapshot_id.foreign_keys) if fkeys: fkey_name = fkeys[0].constraint.name ForeignKeyConstraint(columns=[block_device_mapping.c.snapshot_id], refcolumns=[snapshots.c.id], name=fkey_name).drop() except Exception: LOG.error(_("Foreign Key constraint couldn't be removed")) raise volumes.c.id.alter(String(36), primary_key=True) volumes.c.snapshot_id.alter(String(36)) volume_metadata.c.volume_id.alter(String(36), nullable=False) snapshots.c.id.alter(String(36), primary_key=True) snapshots.c.volume_id.alter(String(36)) sm_volume.c.id.alter(String(36)) block_device_mapping.c.volume_id.alter(String(36)) block_device_mapping.c.snapshot_id.alter(String(36)) iscsi_targets.c.volume_id.alter(String(36), nullable=True) try: fkeys = list(snapshots.c.volume_id.foreign_keys) if fkeys: fkey_name = fkeys[0].constraint.name ForeignKeyConstraint(columns=[snapshots.c.volume_id], refcolumns=[volumes.c.id], name=fkey_name).create() fkeys = list(iscsi_targets.c.volume_id.foreign_keys) if fkeys: fkey_name = fkeys[0].constraint.name ForeignKeyConstraint(columns=[iscsi_targets.c.volume_id], refcolumns=[volumes.c.id], name=fkey_name).create() fkeys = list(volume_metadata.c.volume_id.foreign_keys) if fkeys: fkey_name = fkeys[0].constraint.name ForeignKeyConstraint(columns=[volume_metadata.c.volume_id], refcolumns=[volumes.c.id], name=fkey_name).create() fkeys = list(sm_volume.c.id.foreign_keys) if fkeys: fkey_name = fkeys[0].constraint.name ForeignKeyConstraint(columns=[sm_volume.c.id], refcolumns=[volumes.c.id], name=fkey_name).create() # NOTE(jdg) We're intentionally leaving off FK's on BDM except Exception: LOG.error(_("Foreign Key constraint couldn't be removed")) raise
def downgrade(migrate_engine): """Convert volume and snapshot id columns back to int.""" meta = MetaData() meta.bind = migrate_engine dialect = migrate_engine.url.get_dialect().name if dialect.startswith('sqlite'): return volumes = Table('volumes', meta, autoload=True) snapshots = Table('snapshots', meta, autoload=True) iscsi_targets = Table('iscsi_targets', meta, autoload=True) volume_metadata = Table('volume_metadata', meta, autoload=True) sm_volume = Table('sm_volume', meta, autoload=True) block_device_mapping = Table('block_device_mapping', meta, autoload=True) try: fkeys = list(snapshots.c.volume_id.foreign_keys) if fkeys: fkey_name = fkeys[0].constraint.name ForeignKeyConstraint(columns=[snapshots.c.volume_id], refcolumns=[volumes.c.id], name=fkey_name).drop() fkeys = list(iscsi_targets.c.volume_id.foreign_keys) if fkeys: fkey_name = fkeys[0].constraint.name ForeignKeyConstraint(columns=[iscsi_targets.c.volume_id], refcolumns=[volumes.c.id], name=fkey_name).drop() fkeys = list(volume_metadata.c.volume_id.foreign_keys) if fkeys: fkey_name = fkeys[0].constraint.name ForeignKeyConstraint(columns=[volume_metadata.c.volume_id], refcolumns=[volumes.c.id], name=fkey_name).drop() fkeys = list(sm_volume.c.id.foreign_keys) if fkeys: fkey_name = fkeys[0].constraint.name ForeignKeyConstraint(columns=[sm_volume.c.id], refcolumns=[volumes.c.id], name=fkey_name).drop() except Exception: LOG.error(_("Foreign Key constraint couldn't be removed")) raise volumes.c.id.alter(Integer, primary_key=True, autoincrement=True) volumes.c.snapshot_id.alter(Integer) volume_metadata.c.volume_id.alter(Integer, nullable=False) snapshots.c.id.alter(Integer, primary_key=True, autoincrement=True) snapshots.c.volume_id.alter(Integer) sm_volume.c.id.alter(Integer) block_device_mapping.c.volume_id.alter(Integer) block_device_mapping.c.snapshot_id.alter(Integer) iscsi_targets.c.volume_id.alter(Integer, nullable=True) try: fkeys = list(snapshots.c.volume_id.foreign_keys) if fkeys: fkey_name = fkeys[0].constraint.name ForeignKeyConstraint(columns=[snapshots.c.volume_id], refcolumns=[volumes.c.id], name=fkey_name).create() fkeys = list(iscsi_targets.c.volume_id.foreign_keys) if fkeys: fkey_name = fkeys[0].constraint.name ForeignKeyConstraint(columns=[iscsi_targets.c.volume_id], refcolumns=[volumes.c.id], name=fkey_name).create() fkeys = list(volume_metadata.c.volume_id.foreign_keys) if fkeys: fkey_name = fkeys[0].constraint.name ForeignKeyConstraint(columns=[volume_metadata.c.volume_id], refcolumns=[volumes.c.id], name=fkey_name).create() fkeys = list(sm_volume.c.id.foreign_keys) if fkeys: fkey_name = fkeys[0].constraint.name ForeignKeyConstraint(columns=[sm_volume.c.id], refcolumns=[volumes.c.id], name=fkey_name).create() # NOTE(jdg) Put the BDM foreign keys back in place fkeys = list(block_device_mapping.c.volume_id.foreign_keys) if fkeys: fkey_name = fkeys[0].constraint.name ForeignKeyConstraint(columns=[block_device_mapping.c.volume_id], refcolumns=[volumes.c.id], name=fkey_name).drop() fkeys = list(block_device_mapping.c.snapshot_id.foreign_keys) if fkeys: fkey_name = fkeys[0].constraint.name ForeignKeyConstraint(columns=[block_device_mapping.c.snapshot_id], refcolumns=[snapshots.c.id], name=fkey_name).drop() except Exception: LOG.error(_("Foreign Key constraint couldn't be removed")) raise