def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) port_table = Table('port', meta, autoload=True) subnet_table = Table('subnet', meta, autoload=True) subnet_id = Column('subnet_id', Integer) port_table.create_column(subnet_id) ports = port_table.select().execute() subnets = subnet_table.select().execute() subnets = dict((netaddr.IPNetwork('%s/%s' % (net.ip, net.mask), version=4), net.id) for net in subnets) for port in ports: match = [v for k, v in subnets.items() if netaddr.IPAddress(port.ip) in k] if len(match) != 1: raise RuntimeError('More than one subnet matches %s' % port.ip) port_table.update().where(port_table.c.id == port.id).\ values(subnet_id=match[0]).execute() port_table.c.subnet_id.alter(nullable=False) fkey = ForeignKeyConstraint(columns=[port_table.c.subnet_id], refcolumns=[subnet_table.c.id]) fkey.create()
def upgrade(migrate_engine): if migrate_engine.name == 'sqlite': return meta = MetaData(bind=migrate_engine) for table_name, ref, child in TABLES: table = Table(table_name, meta, autoload=True) column_name, ref_table_name, ref_column_name = ref column = table.c[column_name] ref_table = Table(ref_table_name, meta, autoload=True) ref_column = ref_table.c[ref_column_name] subq = select([ref_column]).where(ref_column != None) if child: # Dump and cleanup rows in child table first child_table_name, child_column_name, child_ref_column_name = child child_table = Table(child_table_name, meta, autoload=True) child_column = child_table.c[child_column_name] child_ref_column = table.c[child_ref_column_name] child_subq = select([child_ref_column]).where(~ column.in_(subq)) dump_cleanup_rows(migrate_engine, meta, child_table, child_column.in_(child_subq)) dump_cleanup_rows(migrate_engine, meta, table, ~ column.in_(subq)) params = {'columns': [column], 'refcolumns': [ref_column]} if migrate_engine.name == 'mysql': params['name'] = "_".join(('fk', table_name, column_name)) fkey = ForeignKeyConstraint(**params) fkey.create()
def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine compute_nodes = Table('compute_nodes', meta, autoload=True) shadow_compute_nodes = Table('shadow_compute_nodes', meta, autoload=True) services = Table('services', meta, autoload=True) _correct_sqlite_unique_constraints(migrate_engine, compute_nodes) # Make the service_id field not nullable # NOTE(sbauza): Beyond the point of this commit, service_id will not be # updated, but previous commits still do. We can tho safely go back to # a state where all the compute nodes are providing this field. compute_nodes.c.service_id.alter(nullable=False) shadow_compute_nodes.c.service_id.alter(nullable=False) # Adding only FK if not existing yet fkeys = {fk.parent.name: fk.column for fk in compute_nodes.foreign_keys} if 'service_id' in fkeys and fkeys['service_id'] == services.c.id: return # NOTE(sbauza): See 216_havana.py for the whole logic if migrate_engine.name == 'postgresql': # PostgreSQL names things like it wants (correct and compatible!) fkey = ForeignKeyConstraint(columns=[compute_nodes.c.service_id], refcolumns=[services.c.id]) fkey.create() else: # For MySQL we name our fkeys explicitly so they match Havana fkey = ForeignKeyConstraint(columns=[compute_nodes.c.service_id], refcolumns=[services.c.id], name='fk_compute_nodes_service_id') fkey.create()
def upgrade(migrate_engine): meta = MetaData(migrate_engine) trait_type = Table( 'trait_type', meta, Column('id', Integer, primary_key=True), Column('desc', String(255)), Column('data_type', Integer), UniqueConstraint('desc', 'data_type', name="tt_unique") ) trait = Table('trait', meta, autoload=True) unique_name = Table('unique_name', meta, autoload=True) trait_type.create(migrate_engine) # Trait type extracts data from Trait and Unique name. # We take all trait names from Unique Name, and data types # from Trait. We then remove dtype and name from trait, and # remove the name field. conn = migrate_engine.connect() sql = ("INSERT INTO trait_type " "SELECT unique_name.id, unique_name.key, trait.t_type FROM trait " "INNER JOIN unique_name " "ON trait.name_id = unique_name.id " "GROUP BY unique_name.id, unique_name.key, trait.t_type") conn.execute(sql) conn.close() # Now we need to drop the foreign key constraint, rename # the trait.name column, and re-add a new foreign # key constraint params = {'columns': [trait.c.name_id], 'refcolumns': [unique_name.c.id]} if migrate_engine.name == 'mysql': params['name'] = "trait_ibfk_1" # foreign key to the unique name table fkey = ForeignKeyConstraint(**params) fkey.drop() Column('trait_type_id', Integer).create(trait) # Move data from name_id column into trait_type_id column query = select([trait.c.id, trait.c.name_id]) for key, value in migration.paged(query): trait.update().where(trait.c.id == key)\ .values({"trait_type_id": value}).execute() trait.c.name_id.drop() params = {'columns': [trait.c.trait_type_id], 'refcolumns': [trait_type.c.id]} if migrate_engine.name == 'mysql': params['name'] = "_".join(('fk', 'trait_type', 'id')) fkey = ForeignKeyConstraint(**params) fkey.create() # Drop the t_type column to data_type. trait.c.t_type.drop() # Finally, drop the unique_name table - we don't need it # anymore. unique_name.drop()
def downgrade(migrate_engine): if migrate_engine.name == 'sqlite': return meta = MetaData(bind=migrate_engine) load_tables = dict((table_name, Table(table_name, meta, autoload=True)) for table_name in TABLES) for table_name, indexes in INDEXES.items(): table = load_tables[table_name] # Save data that conflicted with FK. columns = [column.copy() for column in table.columns] table_dump = Table('dump027_' + table_name, meta, *columns) table_dump.create() for column, ref_table_name, ref_column_name in indexes: ref_table = load_tables[ref_table_name] subq = select([getattr(ref_table.c, ref_column_name)]) sql = utils.InsertFromSelect(table_dump, table.select().where( ~ getattr(table.c, column).in_(subq))) sql_del = table.delete().where( ~ getattr(table.c, column).in_(subq)) migrate_engine.execute(sql) migrate_engine.execute(sql_del) params = {'columns': [table.c[column]], 'refcolumns': [ref_table.c[ref_column_name]]} if migrate_engine.name == 'mysql': params['name'] = "_".join(('fk', table_name, column)) fkey = ForeignKeyConstraint(**params) fkey.create()
def upgrade(migrate_engine): metadata.bind = migrate_engine display_migration_details() # Load existing tables metadata.reflect() try: User_table = Table( "galaxy_user", metadata, autoload=True ) except NoSuchTableError: User_table = None log.debug( "Failed loading table galaxy_user" ) if User_table is not None: try: col = Column( "form_values_id", Integer, index=True ) col.create( User_table, index_name='ix_user_form_values_id') assert col is User_table.c.form_values_id except Exception, e: log.debug( "Adding column 'form_values_id' to galaxy_user table failed: %s" % ( str( e ) ) ) try: FormValues_table = Table( "form_values", metadata, autoload=True ) except NoSuchTableError: FormValues_table = None log.debug( "Failed loading table form_values" ) if migrate_engine.name != 'sqlite': # Add 1 foreign key constraint to the form_values table if User_table is not None and FormValues_table is not None: try: cons = ForeignKeyConstraint( [User_table.c.form_values_id], [FormValues_table.c.id], name='user_form_values_id_fk' ) # Create the constraint cons.create() except Exception, e: log.debug( "Adding foreign key constraint 'user_form_values_id_fk' to table 'galaxy_user' failed: %s" % ( str( e ) ) )
def cascade_fkeys(metadata, restore=False): """ Sets all fkeys to cascade on update """ for table_name, table in metadata.tables.items(): for fkey in list(table.foreign_keys): if restore: if fkey.constraint.name in cascade_fkeys.fkey_onupdate_restore: onupdate = cascade_fkeys.fkey_onupdate_restore[ fkey.constraint.name] else: continue else: cascade_fkeys.fkey_onupdate_restore[fkey.constraint.name] = \ fkey.constraint.onupdate onupdate = "CASCADE" params = { 'columns': fkey.constraint.columns, 'refcolumns': [fkey.column], 'name': fkey.constraint.name, 'onupdate': fkey.constraint.onupdate, 'ondelete': fkey.constraint.ondelete, 'deferrable': fkey.constraint.deferrable, 'initially': fkey.constraint.initially, 'table': table } fkey_constraint = ForeignKeyConstraint(**params) fkey_constraint.drop() params['onupdate'] = onupdate fkey_constraint = ForeignKeyConstraint(**params) fkey_constraint.create()
def upgrade(migrate_engine): metadata.bind = migrate_engine display_migration_details() # Load existing tables metadata.reflect() # Create the folder_id column try: Request_table = Table( "request", metadata, autoload=True ) except NoSuchTableError: Request_table = None log.debug( "Failed loading table request" ) if Request_table is not None: try: col = Column( "folder_id", Integer, index=True ) col.create( Request_table, index_name='ix_request_folder_id') assert col is Request_table.c.folder_id except Exception, e: log.debug( "Adding column 'folder_id' to request table failed: %s" % ( str( e ) ) ) try: LibraryFolder_table = Table( "library_folder", metadata, autoload=True ) except NoSuchTableError: LibraryFolder_table = None log.debug( "Failed loading table library_folder" ) # Add 1 foreign key constraint to the library_folder table if migrate_engine.name != 'sqlite' and Request_table is not None and LibraryFolder_table is not None: try: cons = ForeignKeyConstraint( [Request_table.c.folder_id], [LibraryFolder_table.c.id], name='request_folder_id_fk' ) # Create the constraint cons.create() except Exception, e: log.debug( "Adding foreign key constraint 'request_folder_id_fk' to table 'library_folder' failed: %s" % ( str( e ) ) )
def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine dns_domains_old = Table('dns_domains', meta, autoload=True) dns_domains_old.rename(name='dns_domains_old') # NOTE(dprince): manually remove pkey/fkey for postgres if migrate_engine.name == "postgresql": sql = """ALTER TABLE ONLY dns_domains_old DROP CONSTRAINT dns_domains_pkey; ALTER TABLE ONLY dns_domains_old DROP CONSTRAINT dns_domains_project_id_fkey;""" migrate_engine.execute(sql) #Bind new metadata to avoid issues after the rename meta = MetaData() meta.bind = migrate_engine dns_domains_new = Table('dns_domains', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('domain', String(length=512), primary_key=True, nullable=False), Column('scope', String(length=255)), Column('availability_zone', String(length=255)), Column('project_id', String(length=255)), mysql_engine='InnoDB', mysql_charset='latin1', ) dns_domains_new.create() dns_domains_old = Table('dns_domains_old', meta, autoload=True) record_list = list(dns_domains_old.select().execute()) for rec in record_list: row = dns_domains_new.insert() row.execute({'created_at': rec['created_at'], 'updated_at': rec['updated_at'], 'deleted_at': rec['deleted_at'], 'deleted': rec['deleted'], 'domain': rec['domain'], 'scope': rec['scope'], 'availability_zone': rec['availability_zone'], 'project_id': rec['project_id'], }) dns_domains_old.drop() # NOTE(dprince): We can't easily add the MySQL Fkey on the downgrade # because projects is 'utf8' where dns_domains is 'latin1'. if migrate_engine.name != "mysql": projects = Table('projects', meta, autoload=True) fkey = ForeignKeyConstraint(columns=[dns_domains_new.c.project_id], refcolumns=[projects.c.id]) fkey.create()
def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) event_type = Table( 'event_type', meta, Column('id', Integer, primary_key=True), Column('desc', String(255), unique=True), mysql_engine='InnoDB', mysql_charset='utf8', ) event_type.create() event = Table('event', meta, autoload=True) unique_name = Table('unique_name', meta, autoload=True) # Event type is a specialization of Unique name, so # we insert into the event_type table all the distinct # unique names from the event.unique_name field along # with the key from the unique_name table, and # then rename the event.unique_name field to event.event_type conn = migrate_engine.connect() sql = ("INSERT INTO event_type " "SELECT unique_name.id, unique_name.key FROM event " "INNER JOIN unique_name " "ON event.unique_name_id = unique_name.id " "GROUP BY unique_name.id") conn.execute(sql) conn.close() # Now we need to drop the foreign key constraint, rename # the event.unique_name column, and re-add a new foreign # key constraint params = {'columns': [event.c.unique_name_id], 'refcolumns': [unique_name.c.id]} if migrate_engine.name == 'mysql': params['name'] = "event_ibfk_1" fkey = ForeignKeyConstraint(**params) fkey.drop() Column('event_type_id', Integer).create(event) # Move data from unique_name_id column into event_type_id column # and delete the entry from the unique_name table query = select([event.c.id, event.c.unique_name_id]) for key, value in migration.paged(query): event.update().where(event.c.id == key)\ .values({"event_type_id": value}).execute() unique_name.delete()\ .where(unique_name.c.id == key).execute() params = {'columns': [event.c.event_type_id], 'refcolumns': [event_type.c.id]} if migrate_engine.name == 'mysql': params['name'] = "_".join(('fk', 'event_type', 'id')) fkey = ForeignKeyConstraint(**params) fkey.create() event.c.unique_name_id.drop()
def downgrade(migrate_engine): """Convert volume_type from UUID back to int.""" meta = MetaData() meta.bind = migrate_engine volumes = Table("volumes", meta, autoload=True) volume_types = Table("volume_types", meta, autoload=True) extra_specs = Table("volume_type_extra_specs", meta, autoload=True) fkey_remove_list = [volumes.c.volume_type_id, volume_types.c.id, extra_specs.c.volume_type_id] for column in fkey_remove_list: fkeys = list(column.foreign_keys) if fkeys: fkey_name = fkeys[0].constraint.name fkey = ForeignKeyConstraint(columns=[column], refcolumns=[volume_types.c.id], name=fkey_name) try: fkey.drop() except Exception: if migrate_engine.url.get_dialect().name.startswith("sqlite"): pass else: raise vtype_list = list(volume_types.select().execute()) new_id = 1 for t in vtype_list: volumes.update().where(volumes.c.volume_type_id == t["id"]).values(volume_type_id=new_id).execute() extra_specs.update().where(extra_specs.c.volume_type_id == t["id"]).values(volume_type_id=new_id).execute() volume_types.update().where(volume_types.c.id == t["id"]).values(id=new_id).execute() new_id += 1 volumes.c.volume_type_id.alter(Integer) volume_types.c.id.alter(Integer) extra_specs.c.volume_type_id.alter(Integer) for column in fkey_remove_list: fkeys = list(column.foreign_keys) if fkeys: fkey_name = fkeys[0].constraint.name fkey = ForeignKeyConstraint(columns=[column], refcolumns=[volume_types.c.id], name=fkey_name) try: fkey.create() LOG.info("Created foreign key %s" % fkey_name) except Exception: if migrate_engine.url.get_dialect().name.startswith("sqlite"): pass else: raise
def upgrade(migrate_engine): if migrate_engine.name == "sqlite": return meta = MetaData(bind=migrate_engine) storage_pools = Table("storage_pools", meta, autoload=True) storage_groups = Table("storage_groups", meta, autoload=True) params = {"columns": [storage_pools.c.primary_storage_group_id], "refcolumns": [storage_groups.c.id]} if migrate_engine.name == "mysql": params["name"] = "_".join(("storage_pool", "primary_storage_group_id", "fkey")) fkey = ForeignKeyConstraint(**params) fkey.create()
def upgrade(migrate_engine): if migrate_engine.name == 'sqlite': return meta = MetaData(bind=migrate_engine) storage_pools = Table('storage_pools', meta, autoload=True) storage_groups = Table('storage_groups', meta, autoload=True) params = {'columns': [storage_pools.c.primary_storage_group_id], 'refcolumns': [storage_groups.c.id]} if migrate_engine.name == 'mysql': params['name'] = "_".join(('storage_pools', 'primary_storage_group_ids', 'fkey')) fkey = ForeignKeyConstraint(**params) fkey.create()
def upgrade(migrate_engine): metadata.bind = migrate_engine display_migration_details() # Load existing tables metadata.reflect() # Create the job_to_output_library_dataset table try: JobToOutputLibraryDatasetAssociation_table.create() except Exception as e: print("Creating job_to_output_library_dataset table failed: %s" % str( e )) log.debug( "Creating job_to_output_library_dataset table failed: %s" % str( e ) ) # Create the library_folder_id column try: Job_table = Table( "job", metadata, autoload=True ) except NoSuchTableError: Job_table = None log.debug( "Failed loading table job" ) if Job_table is not None: try: col = Column( "library_folder_id", Integer, index=True ) col.create( Job_table, index_name='ix_job_library_folder_id') assert col is Job_table.c.library_folder_id except Exception as e: log.debug( "Adding column 'library_folder_id' to job table failed: %s" % ( str( e ) ) ) try: LibraryFolder_table = Table( "library_folder", metadata, autoload=True ) except NoSuchTableError: LibraryFolder_table = None log.debug( "Failed loading table library_folder" ) # Add 1 foreign key constraint to the job table if migrate_engine.name != 'sqlite': # Sqlite can't alter-table-add-foreign-key if Job_table is not None and LibraryFolder_table is not None: try: cons = ForeignKeyConstraint( [Job_table.c.library_folder_id], [LibraryFolder_table.c.id], name='job_library_folder_id_fk' ) # Create the constraint cons.create() except Exception as e: log.debug( "Adding foreign key constraint 'job_library_folder_id_fk' to table 'library_folder' failed: %s" % ( str( e ) ) ) # Create the ix_dataset_state index try: Dataset_table = Table( "dataset", metadata, autoload=True ) except NoSuchTableError: Dataset_table = None log.debug( "Failed loading table dataset" ) i = Index( "ix_dataset_state", Dataset_table.c.state ) try: i.create() except Exception as e: print(str(e)) log.debug( "Adding index 'ix_dataset_state' to dataset table failed: %s" % str( e ) )
def upgrade(migrate_engine): metadata.bind = migrate_engine print(__doc__) # Load existing tables metadata.reflect() # Create the folder_id column try: Request_table = Table("request", metadata, autoload=True) except NoSuchTableError: Request_table = None log.debug("Failed loading table request") if Request_table is not None: try: col = Column("folder_id", Integer, index=True) col.create(Request_table, index_name='ix_request_folder_id') assert col is Request_table.c.folder_id except Exception: log.exception("Adding column 'folder_id' to request table failed.") try: LibraryFolder_table = Table("library_folder", metadata, autoload=True) except NoSuchTableError: LibraryFolder_table = None log.debug("Failed loading table library_folder") # Add 1 foreign key constraint to the library_folder table if migrate_engine.name != 'sqlite' and Request_table is not None and LibraryFolder_table is not None: try: cons = ForeignKeyConstraint([Request_table.c.folder_id], [LibraryFolder_table.c.id], name='request_folder_id_fk') # Create the constraint cons.create() except Exception: log.exception("Adding foreign key constraint 'request_folder_id_fk' to table 'library_folder' failed.") # Create the type column in form_definition try: FormDefinition_table = Table("form_definition", metadata, autoload=True) except NoSuchTableError: FormDefinition_table = None log.debug("Failed loading table form_definition") if FormDefinition_table is not None: try: col = Column("type", TrimmedString(255), index=True) col.create(FormDefinition_table, index_name='ix_form_definition_type') assert col is FormDefinition_table.c.type except Exception: log.exception("Adding column 'type' to form_definition table failed.") try: col = Column("layout", JSONType()) col.create(FormDefinition_table) assert col is FormDefinition_table.c.layout except Exception: log.exception("Adding column 'layout' to form_definition table failed.")
def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; bind # migrate_engine to your metadata meta.bind = migrate_engine t.create() th.create() tt = Table('proc_SelectedHistoriesTable', meta, autoload=True) c = Column('selected_arar_id', Integer) c.create(tt) # a = tt.c.selected_arar con = ForeignKeyConstraint([c], [t.c.id]) con.create()
def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) load_tables = dict((table_name, Table(table_name, meta, autoload=True)) for table_name in TABLES) for table_name, indexes in INDEXES.items(): table = load_tables[table_name] for column, ref_table_name, ref_column_name in indexes: ref_table = load_tables[ref_table_name] params = {'columns': [table.c[column]], 'refcolumns': [ref_table.c[ref_column_name]]} if migrate_engine.name == 'mysql': params['name'] = "_".join(('fk', table_name, column)) fkey = ForeignKeyConstraint(**params) fkey.create()
def upgrade(migrate_engine): metadata.bind = migrate_engine display_migration_details() # Load existing tables metadata.reflect() # Add all of the new tables above # metadata.create_all() try: FormDefinitionCurrent_table.create() except Exception as e: log.debug( "Creating form_definition_current table failed: %s" % str( e ) ) try: FormDefinition_table.create() except Exception as e: log.debug( "Creating form_definition table failed: %s" % str( e ) ) # Add 1 foreign key constraint to the form_definition_current table if FormDefinitionCurrent_table is not None and FormDefinition_table is not None: try: cons = ForeignKeyConstraint( [FormDefinitionCurrent_table.c.latest_form_id], [FormDefinition_table.c.id], name='form_definition_current_latest_form_id_fk' ) # Create the constraint cons.create() except Exception as e: log.debug( "Adding foreign key constraint 'form_definition_current_latest_form_id_fk' to table 'form_definition_current' failed: %s" % ( str( e ) ) ) try: FormValues_table.create() except Exception as e: log.debug( "Creating form_values table failed: %s" % str( e ) ) try: RequestType_table.create() except Exception as e: log.debug( "Creating request_type table failed: %s" % str( e ) ) try: Request_table.create() except Exception as e: log.debug( "Creating request table failed: %s" % str( e ) ) try: Sample_table.create() except Exception as e: log.debug( "Creating sample table failed: %s" % str( e ) ) try: SampleState_table.create() except Exception as e: log.debug( "Creating sample_state table failed: %s" % str( e ) ) try: SampleEvent_table.create() except Exception as e: log.debug( "Creating sample_event table failed: %s" % str( e ) )
def upgrade(migrate_engine): if migrate_engine.name == 'ibm_db_sa': # create the foreign keys metadata = MetaData(bind=migrate_engine) for values in DB2_FKEYS: # NOTE(mriedem): We have to load all of the tables in the same # MetaData object for the ForeignKey object to work, so we just # load up the Column objects here as well dynamically. params = dict(name=values['name']) table = Table(values['table'], metadata, autoload=True) params['table'] = table params['columns'] = [table.c[col] for col in values['columns']] params['refcolumns'] = _get_refcolumns(metadata, values['refcolumns']) fkey = ForeignKeyConstraint(**params) fkey.create()
def upgrade(migrate_engine): if migrate_engine.name == "sqlite": return meta = MetaData(bind=migrate_engine) load_tables = dict((table_name, Table(table_name, meta, autoload=True)) for table_name in TABLES) for table_name, indexes in INDEXES.items(): table = load_tables[table_name] for column, ref_table_name, ref_column_name in indexes: ref_table = load_tables[ref_table_name] subq = select([getattr(ref_table.c, ref_column_name)]) sql_del = table.delete().where(~getattr(table.c, column).in_(subq)) migrate_engine.execute(sql_del) params = {"columns": [table.c[column]], "refcolumns": [ref_table.c[ref_column_name]]} if migrate_engine.name == "mysql": params["name"] = "_".join(("fk", table_name, column)) fkey = ForeignKeyConstraint(**params) fkey.create()
def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine encryptions = Table('encryption', meta, autoload=True) encryption_id_pk = PrimaryKeyConstraint(encryptions.columns.encryption_id) encryption_id_pk.drop() encryptions.drop_column(encryptions.columns.encryption_id) volume_type_pk = PrimaryKeyConstraint(encryptions.columns.volume_type_id) volume_type_pk.create() ref_table = Table('volume_types', meta, autoload=True) params = {'columns': [encryptions.c['volume_type_id']], 'refcolumns': [ref_table.c['id']], 'name': 'encryption_ibfk_1'} volume_type_fk = ForeignKeyConstraint(**params) volume_type_fk.create()
def downgrade(migrate_engine): if migrate_engine.name == 'mysql': # NOTE(jhesketh): MySQL Cannot drop index # 'uniq_aggregate_metadata0aggregate_id0key0deleted': needed in a # foreign key constraint. So we'll remove the fkey constraint on the # aggregate_metadata table and add it back after the index is # downgraded. meta = MetaData(bind=migrate_engine) table = Table('aggregate_metadata', meta, autoload=True) ref_table = Table('aggregates', meta, autoload=True) params = {'columns': [table.c['aggregate_id']], 'refcolumns': [ref_table.c['id']], 'name': 'aggregate_metadata_ibfk_1'} fkey = ForeignKeyConstraint(**params) fkey.drop() utils.drop_unique_constraint(migrate_engine, TABLE_NAME, UC_NAME, *COLUMNS) if migrate_engine.name == 'mysql': fkey.create()
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine columns = [ (('created_at', DateTime), {}), (('updated_at', DateTime), {}), (('deleted_at', DateTime), {}), (('deleted', Integer), {}), (('id', Integer), dict(primary_key=True, nullable=False)), (('instance_uuid', String(length=36)), dict(nullable=False)), (('vcpu_topology', Text), dict(nullable=True)), (('scheduler_hints', Text), dict(nullable=True)), (('core_bind', Text), dict(nullable=True)), (('request_network', Text), dict(nullable=True)), (('stats', Text), dict(nullable=True)), ] for prefix in ('', 'shadow_'): instances = Table(prefix + 'instances', meta, autoload=True) basename = prefix + 'huawei_instance_extra' if migrate_engine.has_table(basename): continue _columns = tuple([Column(*args, **kwargs) for args, kwargs in columns]) table = Table(basename, meta, *_columns, mysql_engine='InnoDB', mysql_charset='utf8') table.create() # Index instance_uuid_index = Index(basename + '_idx', table.c.instance_uuid) instance_uuid_index.create(migrate_engine) # Foreign key if not prefix: fkey_columns = [table.c.instance_uuid] fkey_refcolumns = [instances.c.uuid] instance_fkey = ForeignKeyConstraint( columns=fkey_columns, refcolumns=fkey_refcolumns) instance_fkey.create()
def downgrade(migrate_engine): if migrate_engine.name == 'mysql': # NOTE(jhesketh): MySQL Cannot drop index # migrations_instance_uuid_and_status_idx needed in a foreign # key constraint. So we'll remove the fkey constraint on the # aggregate_metadata table and add it back after the indexes are # downgraded. meta = MetaData(bind=migrate_engine) table = Table('migrations', meta, autoload=True) ref_table = Table('instances', meta, autoload=True) params = {'columns': [table.c['instance_uuid']], 'refcolumns': [ref_table.c['uuid']]} if migrate_engine.name == 'mysql': params['name'] = 'fk_migrations_instance_uuid' fkey = ForeignKeyConstraint(**params) fkey.drop() utils.modify_indexes(migrate_engine, data, upgrade=False) if migrate_engine.name == 'mysql': fkey.create()
def upgrade(migrate_engine): if migrate_engine.name == "sqlite": _upgrade_sqlite(migrate_engine) return meta = sqlalchemy.MetaData(bind=migrate_engine) stack = sqlalchemy.Table("stack", meta, autoload=True) prev_raw_template_id = sqlalchemy.Column("prev_raw_template_id", sqlalchemy.Integer) current_traversal = sqlalchemy.Column("current_traversal", sqlalchemy.String(36)) current_deps = sqlalchemy.Column("current_deps", heat_db_types.Json) prev_raw_template_id.create(stack) current_traversal.create(stack) current_deps.create(stack) raw_template = sqlalchemy.Table("raw_template", meta, autoload=True) fkey = ForeignKeyConstraint( columns=[stack.c.prev_raw_template_id], refcolumns=[raw_template.c.id], name="prev_raw_template_ref" ) fkey.create()
def upgrade(migrate_engine): meta = sqlalchemy.MetaData() meta.bind = migrate_engine resource_data = sqlalchemy.Table('resource_data', meta, autoload=True) resource = sqlalchemy.Table('resource', meta, autoload=True) for fk in resource_data.foreign_keys: if fk.column == resource.c.id: # delete the existing fk # and create with ondelete cascade and a proper name existing_fkey = ForeignKeyConstraint( columns=[resource_data.c.resource_id], refcolumns=[resource.c.id], name=fk.name) existing_fkey.drop() fkey = ForeignKeyConstraint( columns=[resource_data.c.resource_id], refcolumns=[resource.c.id], name="fk_resource_id", ondelete='CASCADE') fkey.create() break
def downgrade(migrate_engine): meta = MetaData(bind=migrate_engine) event_type = Table('event_type', meta, autoload=True) event = Table('event', meta, autoload=True) unique_name = Table('unique_name', meta, autoload=True) # Re-insert the event type table records into the old # unique_name table. conn = migrate_engine.connect() sql = ("INSERT INTO unique_name " "SELECT event_type.id, event_type.desc FROM event_type") conn.execute(sql) conn.close() # Drop the foreign key constraint to event_type, drop the # event_type table, rename the event.event_type column to # event.unique_name, and re-add the old foreign # key constraint params = {'columns': [event.c.event_type_id], 'refcolumns': [event_type.c.id]} if migrate_engine.name == 'mysql': params['name'] = "_".join(('fk', 'event_type', 'id')) fkey = ForeignKeyConstraint(**params) fkey.drop() event_type.drop() Column('unique_name_id', Integer).create(event) # Move data from event_type_id column to unique_name_id column query = select([event.c.id, event.c.event_type_id]) for key, value in migration.paged(query): event.update().where(event.c.id == key)\ .values({"unique_name_id": value}).execute() event.c.event_type_id.drop() params = {'columns': [event.c.unique_name_id], 'refcolumns': [unique_name.c.id]} if migrate_engine.name == 'mysql': params['name'] = 'event_ibfk_1' fkey = ForeignKeyConstraint(**params) fkey.create()
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine columns = [ (('created_at', DateTime), {}), (('updated_at', DateTime), {}), (('deleted_at', DateTime), {}), (('deleted', Integer), {}), (('id', Integer), dict(primary_key=True, nullable=False)), (('instance_uuid', String(length=36)), dict(nullable=False)), (('numa_topology', Text), dict(nullable=True)), ] for prefix in ('', 'shadow_'): instances = Table(prefix + 'instances', meta, autoload=True) basename = prefix + 'instance_extra' if migrate_engine.has_table(basename): continue _columns = tuple([Column(*args, **kwargs) for args, kwargs in columns]) table = Table(basename, meta, *_columns, mysql_engine='InnoDB', mysql_charset='utf8') table.create() # Index instance_uuid_index = Index(basename + '_idx', table.c.instance_uuid) instance_uuid_index.create(migrate_engine) # Foreign key # NOTE(mriedem): DB2 won't create the ForeignKey over the # instances.uuid column since it doesn't have a UniqueConstraint (added # later in the 267 migration). The ForeignKey will be created for DB2 # in the 296 migration. if not prefix and migrate_engine.name != 'ibm_db_sa': fkey_columns = [table.c.instance_uuid] fkey_refcolumns = [instances.c.uuid] instance_fkey = ForeignKeyConstraint( columns=fkey_columns, refcolumns=fkey_refcolumns) instance_fkey.create()
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine if migrate_engine.name == 'sqlite': # SQLite is also missing this one index if not utils.index_exists(migrate_engine, 'fixed_ips', 'address'): utils.add_index(migrate_engine, 'fixed_ips', 'address', ['address']) for src_table, src_column, dst_table, dst_column, name in FKEYS: src_table = Table(src_table, meta, autoload=True) if name in set(fk.name for fk in src_table.foreign_keys): continue src_column = src_table.c[src_column] dst_table = Table(dst_table, meta, autoload=True) dst_column = dst_table.c[dst_column] fkey = ForeignKeyConstraint(columns=[src_column], refcolumns=[dst_column], name=name) fkey.create() # SQLAlchemy versions < 1.0.0 don't reflect unique constraints # for SQLite correctly causing sqlalchemy-migrate to recreate # some tables with missing unique constraints. Re-add some # potentially missing unique constraints as a workaround. for table_name, name, column_names in UNIQUES: table = Table(table_name, meta, autoload=True) if name in set(c.name for c in table.constraints if isinstance(table, schema.UniqueConstraint)): continue uc = UniqueConstraint(*column_names, table=table, name=name) uc.create()
def upgrade(migrate_engine): meta = sqlalchemy.MetaData() meta.bind = migrate_engine hosts_table = Table('hosts', meta, autoload=True) failover_segments = Table('failover_segments', meta, autoload=True) # NOTE(Dinesh_Bhor) We need to drop foreign keys first because unique # constraints that we want to delete depend on them. So drop the fk and # recreate it again after unique constraint deletion. cons_fk = ForeignKeyConstraint([hosts_table.c.failover_segment_id], [failover_segments.c.uuid], name="fk_failover_segments_uuid") cons_fk.drop(engine=migrate_engine) cons_unique = UniqueConstraint('failover_segment_id', 'name', 'deleted', name='uniq_host0name0deleted', table=hosts_table) cons_unique.drop(engine=migrate_engine) # Create an updated unique constraint updated_cons_unique = UniqueConstraint('name', 'deleted', name='uniq_host0name0deleted', table=hosts_table) cons_fk.create() updated_cons_unique.create()
def upgrade(migrate_engine): """Convert volume_type_id to UUID.""" meta = MetaData() meta.bind = migrate_engine volumes = Table('volumes', meta, autoload=True) volume_types = Table('volume_types', meta, autoload=True) extra_specs = Table('volume_type_extra_specs', meta, autoload=True) fkey_remove_list = [ volumes.c.volume_type_id, volume_types.c.id, extra_specs.c.volume_type_id ] for column in fkey_remove_list: fkeys = list(column.foreign_keys) if fkeys: fkey_name = fkeys[0].constraint.name fkey = ForeignKeyConstraint(columns=[column], refcolumns=[volume_types.c.id], name=fkey_name) try: fkey.drop() except Exception: if migrate_engine.url.get_dialect().name.startswith('sqlite'): pass else: raise volumes.c.volume_type_id.alter(String(36)) volume_types.c.id.alter(String(36)) extra_specs.c.volume_type_id.alter(String(36)) vtype_list = list(volume_types.select().execute()) for t in vtype_list: new_id = str(uuid.uuid4()) volumes.update().\ where(volumes.c.volume_type_id == t['id']).\ values(volume_type_id=new_id).execute() extra_specs.update().\ where(extra_specs.c.volume_type_id == t['id']).\ values(volume_type_id=new_id).execute() volume_types.update().\ where(volume_types.c.id == t['id']).\ values(id=new_id).execute() for column in fkey_remove_list: fkeys = list(column.foreign_keys) if fkeys: fkey_name = fkeys[0].constraint.name fkey = ForeignKeyConstraint(columns=[column], refcolumns=[volume_types.c.id], name=fkey_name) try: fkey.create() except Exception: if migrate_engine.url.get_dialect().name.startswith('sqlite'): pass else: raise
def downgrade(migrate_engine): meta = MetaData(migrate_engine) unique_name = Table( 'unique_name', meta, Column('id', Integer, primary_key=True), Column('key', String(255), unique=True) ) trait_type = Table('trait_type', meta, autoload=True) trait = Table('trait', meta, autoload=True) # Create the UniqueName table, drop the foreign key constraint # to trait_type, drop the trait_type table, rename the # trait.trait_type column to traitname, re-add the dtype to # the trait table, and re-add the old foreign key constraint unique_name.create(migrate_engine) conn = migrate_engine.connect() sql = ("INSERT INTO unique_name " "SELECT trait_type.id, trait_type.desc " "FROM trait_type") conn.execute(sql) conn.close() params = {'columns': [trait.c.trait_type_id], 'refcolumns': [trait_type.c.id]} if migrate_engine.name == 'mysql': params['name'] = "_".join(('fk', 'trait_type', 'id')) fkey = ForeignKeyConstraint(**params) fkey.drop() # Re-create the old columns in trait Column("name_id", Integer).create(trait) Column("t_type", Integer).create(trait) # copy data from trait_type.data_type into trait.t_type query = select([trait_type.c.id, trait_type.c.data_type]) for key, value in migration.paged(query): trait.update().where(trait.c.trait_type_id == key)\ .values({"t_type": value}).execute() # Move data from name_id column into trait_type_id column query = select([trait.c.id, trait.c.trait_type_id]) for key, value in migration.paged(query): trait.update().where(trait.c.id == key)\ .values({"name_id": value}).execute() # Add a foreign key to the unique_name table params = {'columns': [trait.c.name_id], 'refcolumns': [unique_name.c.id]} if migrate_engine.name == 'mysql': params['name'] = 'trait_ibfk_1' fkey = ForeignKeyConstraint(**params) fkey.create() trait.c.trait_type_id.drop() # Drop the trait_type table. It isn't needed anymore trait_type.drop()
def upgrade(migrate_engine): print(__doc__) metadata.bind = migrate_engine metadata.reflect() # Add 2 new columns to the galaxy_user table User_table = Table("galaxy_user", metadata, autoload=True) col = Column('deleted', Boolean, index=True, default=False) add_column(col, User_table, metadata, index_name='ix_galaxy_user_deleted') col = Column('purged', Boolean, index=True, default=False) add_column(col, User_table, metadata, index_name='ix_galaxy_user_purged') # Add 1 new column to the history_dataset_association table HistoryDatasetAssociation_table = Table("history_dataset_association", metadata, autoload=True) col = Column('copied_from_library_dataset_dataset_association_id', Integer, nullable=True) add_column(col, HistoryDatasetAssociation_table, metadata) # Add 1 new column to the metadata_file table MetadataFile_table = Table("metadata_file", metadata, autoload=True) col = Column('lda_id', Integer, index=True, nullable=True) add_column(col, MetadataFile_table, metadata, index_name='ix_metadata_file_lda_id') # Add 1 new column to the stored_workflow table - changeset 2328 StoredWorkflow_table = Table( "stored_workflow", metadata, Column("latest_workflow_id", Integer, ForeignKey("workflow.id", use_alter=True, name='stored_workflow_latest_workflow_id_fk'), index=True), autoload=True, extend_existing=True) col = Column('importable', Boolean, default=False) add_column(col, StoredWorkflow_table, metadata) # Create an index on the Job.state column - changeset 2192 add_index('ix_job_state', 'job', 'state', metadata) # Add all of the new tables above metadata.create_all() # Add 1 foreign key constraint to the history_dataset_association table LibraryDatasetDatasetAssociation_table = Table( "library_dataset_dataset_association", metadata, autoload=True) try: cons = ForeignKeyConstraint( [ HistoryDatasetAssociation_table.c. copied_from_library_dataset_dataset_association_id ], [LibraryDatasetDatasetAssociation_table.c.id], name= 'history_dataset_association_copied_from_library_dataset_da_fkey') # Create the constraint cons.create() except Exception: log.exception( "Adding foreign key constraint 'history_dataset_association_copied_from_library_dataset_da_fkey' to table 'history_dataset_association' failed." ) # Add 1 foreign key constraint to the metadata_file table LibraryDatasetDatasetAssociation_table = Table( "library_dataset_dataset_association", metadata, autoload=True) if migrate_engine.name != 'sqlite': # Sqlite can't alter table add foreign key. try: cons = ForeignKeyConstraint( [MetadataFile_table.c.lda_id], [LibraryDatasetDatasetAssociation_table.c.id], name='metadata_file_lda_id_fkey') # Create the constraint cons.create() except Exception: log.exception( "Adding foreign key constraint 'metadata_file_lda_id_fkey' to table 'metadata_file' failed." ) # Make sure we have at least 1 user cmd = "SELECT * FROM galaxy_user;" users = migrate_engine.execute(cmd).fetchall() if users: cmd = "SELECT * FROM role;" roles = migrate_engine.execute(cmd).fetchall() if not roles: # Create private roles for each user - pass 1 cmd = \ "INSERT INTO role " + \ "SELECT %s AS id," + \ "%s AS create_time," + \ "%s AS update_time," + \ "email AS name," + \ "email AS description," + \ "'private' As type," + \ "%s AS deleted " + \ "FROM galaxy_user " + \ "ORDER BY id;" cmd = cmd % (nextval(migrate_engine, 'role'), localtimestamp(migrate_engine), localtimestamp(migrate_engine), engine_false(migrate_engine)) migrate_engine.execute(cmd) # Create private roles for each user - pass 2 if migrate_engine.name in ['postgres', 'postgresql', 'sqlite']: cmd = "UPDATE role SET description = 'Private role for ' || description;" elif migrate_engine.name == 'mysql': cmd = "UPDATE role SET description = CONCAT( 'Private role for ', description );" migrate_engine.execute(cmd) # Create private roles for each user - pass 3 cmd = \ "INSERT INTO user_role_association " + \ "SELECT %s AS id," + \ "galaxy_user.id AS user_id," + \ "role.id AS role_id," + \ "%s AS create_time," + \ "%s AS update_time " + \ "FROM galaxy_user, role " + \ "WHERE galaxy_user.email = role.name " + \ "ORDER BY galaxy_user.id;" cmd = cmd % (nextval(migrate_engine, 'user_role_association'), localtimestamp(migrate_engine), localtimestamp(migrate_engine)) migrate_engine.execute(cmd) # Create default permissions for each user cmd = \ "INSERT INTO default_user_permissions " + \ "SELECT %s AS id," + \ "galaxy_user.id AS user_id," + \ "'manage permissions' AS action," + \ "user_role_association.role_id AS role_id " + \ "FROM galaxy_user " + \ "JOIN user_role_association ON user_role_association.user_id = galaxy_user.id " + \ "ORDER BY galaxy_user.id;" cmd = cmd % nextval(migrate_engine, 'default_user_permissions') migrate_engine.execute(cmd) # Create default history permissions for each active history associated with a user cmd = \ "INSERT INTO default_history_permissions " + \ "SELECT %s AS id," + \ "history.id AS history_id," + \ "'manage permissions' AS action," + \ "user_role_association.role_id AS role_id " + \ "FROM history " + \ "JOIN user_role_association ON user_role_association.user_id = history.user_id " + \ "WHERE history.purged = %s AND history.user_id IS NOT NULL;" cmd = cmd % (nextval( migrate_engine, 'default_history_permissions'), engine_false(migrate_engine)) migrate_engine.execute(cmd) # Create "manage permissions" dataset_permissions for all activate-able datasets cmd = \ "INSERT INTO dataset_permissions " + \ "SELECT %s AS id," + \ "%s AS create_time," + \ "%s AS update_time," + \ "'manage permissions' AS action," + \ "history_dataset_association.dataset_id AS dataset_id," + \ "user_role_association.role_id AS role_id " + \ "FROM history " + \ "JOIN history_dataset_association ON history_dataset_association.history_id = history.id " + \ "JOIN dataset ON history_dataset_association.dataset_id = dataset.id " + \ "JOIN user_role_association ON user_role_association.user_id = history.user_id " + \ "WHERE dataset.purged = %s AND history.user_id IS NOT NULL;" cmd = cmd % (nextval(migrate_engine, 'dataset_permissions'), localtimestamp(migrate_engine), localtimestamp(migrate_engine), engine_false(migrate_engine)) migrate_engine.execute(cmd)
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine agent_builds = Table('agent_builds', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('hypervisor', String(length=255)), Column('os', String(length=255)), Column('architecture', String(length=255)), Column('version', String(length=255)), Column('url', String(length=255)), Column('md5hash', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) aggregate_hosts = Table('aggregate_hosts', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('host', String(length=255)), Column('aggregate_id', Integer, ForeignKey('aggregates.id'), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8' ) aggregate_metadata = Table('aggregate_metadata', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('aggregate_id', Integer, ForeignKey('aggregates.id'), nullable=False), Column('key', String(length=255), nullable=False), Column('value', String(length=255), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8' ) aggregates = Table('aggregates', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('name', String(length=255)), Column('availability_zone', String(length=255), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8' ) block_device_mapping = Table('block_device_mapping', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('device_name', String(length=255), nullable=False), Column('delete_on_termination', Boolean), Column('virtual_name', String(length=255)), Column('snapshot_id', String(length=36), nullable=True), Column('volume_id', String(length=36), nullable=True), Column('volume_size', Integer), Column('no_device', Boolean), Column('connection_info', MediumText()), Column('instance_uuid', String(length=36)), mysql_engine='InnoDB', mysql_charset='utf8' ) bw_usage_cache = Table('bw_usage_cache', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('start_period', DateTime, nullable=False), Column('last_refreshed', DateTime), Column('bw_in', BigInteger), Column('bw_out', BigInteger), Column('mac', String(length=255)), Column('uuid', String(length=36)), mysql_engine='InnoDB', mysql_charset='utf8' ) cells = Table('cells', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('api_url', String(length=255)), Column('username', String(length=255)), Column('password', String(length=255)), Column('weight_offset', Float), Column('weight_scale', Float), Column('name', String(length=255)), Column('is_parent', Boolean), Column('rpc_host', String(length=255)), Column('rpc_port', Integer), Column('rpc_virtual_host', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) certificates = Table('certificates', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('user_id', String(length=255)), Column('project_id', String(length=255)), Column('file_name', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) compute_node_stats = Table('compute_node_stats', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('compute_node_id', Integer, nullable=False), Column('key', String(length=255), nullable=False), Column('value', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) compute_nodes = Table('compute_nodes', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('service_id', Integer, nullable=False), Column('vcpus', Integer, nullable=False), Column('memory_mb', Integer, nullable=False), Column('local_gb', Integer, nullable=False), Column('vcpus_used', Integer, nullable=False), Column('memory_mb_used', Integer, nullable=False), Column('local_gb_used', Integer, nullable=False), Column('hypervisor_type', MediumText(), nullable=False), Column('hypervisor_version', Integer, nullable=False), Column('cpu_info', MediumText(), nullable=False), Column('disk_available_least', Integer), Column('free_ram_mb', Integer), Column('free_disk_gb', Integer), Column('current_workload', Integer), Column('running_vms', Integer), Column('hypervisor_hostname', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) console_pools = Table('console_pools', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('address', String(length=255)), Column('username', String(length=255)), Column('password', String(length=255)), Column('console_type', String(length=255)), Column('public_hostname', String(length=255)), Column('host', String(length=255)), Column('compute_host', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) consoles = Table('consoles', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('instance_name', String(length=255)), Column('password', String(length=255)), Column('port', Integer), Column('pool_id', Integer, ForeignKey('console_pools.id')), Column('instance_uuid', String(length=36)), mysql_engine='InnoDB', mysql_charset='utf8' ) dns_domains = Table('dns_domains', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('domain', String(length=255), primary_key=True, nullable=False), Column('scope', String(length=255)), Column('availability_zone', String(length=255)), Column('project_id', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) fixed_ips = Table('fixed_ips', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('address', String(length=255)), Column('network_id', Integer), Column('allocated', Boolean), Column('leased', Boolean), Column('reserved', Boolean), Column('virtual_interface_id', Integer), Column('host', String(length=255)), Column('instance_uuid', String(length=36)), mysql_engine='InnoDB', mysql_charset='utf8' ) floating_ips = Table('floating_ips', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('address', String(length=255)), Column('fixed_ip_id', Integer), Column('project_id', String(length=255)), Column('host', String(length=255)), Column('auto_assigned', Boolean), Column('pool', String(length=255)), Column('interface', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) instance_faults = Table('instance_faults', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('instance_uuid', String(length=36)), Column('code', Integer, nullable=False), Column('message', String(length=255)), Column('details', MediumText()), mysql_engine='InnoDB', mysql_charset='utf8' ) instance_id_mappings = Table('instance_id_mappings', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('uuid', String(36), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8' ) instance_info_caches = Table('instance_info_caches', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('network_info', MediumText()), Column('instance_uuid', String(length=36), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8' ) instance_metadata = Table('instance_metadata', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('key', String(length=255)), Column('value', String(length=255)), Column('instance_uuid', String(length=36), nullable=True), mysql_engine='InnoDB', mysql_charset='utf8' ) instance_system_metadata = Table('instance_system_metadata', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('instance_uuid', String(length=36), nullable=False), Column('key', String(length=255), nullable=False), Column('value', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) instance_type_extra_specs = Table('instance_type_extra_specs', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('instance_type_id', Integer, ForeignKey('instance_types.id'), nullable=False), Column('key', String(length=255)), Column('value', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) instance_type_projects = Table('instance_type_projects', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('instance_type_id', Integer, nullable=False), Column('project_id', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) instance_types = Table('instance_types', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('name', String(length=255)), Column('id', Integer, primary_key=True, nullable=False), Column('memory_mb', Integer, nullable=False), Column('vcpus', Integer, nullable=False), Column('swap', Integer, nullable=False), Column('vcpu_weight', Integer), Column('flavorid', String(length=255)), Column('rxtx_factor', Float), Column('root_gb', Integer), Column('ephemeral_gb', Integer), Column('disabled', Boolean), Column('is_public', Boolean), mysql_engine='InnoDB', mysql_charset='utf8' ) instances = Table('instances', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('internal_id', Integer), Column('user_id', String(length=255)), Column('project_id', String(length=255)), Column('image_ref', String(length=255)), Column('kernel_id', String(length=255)), Column('ramdisk_id', String(length=255)), Column('server_name', String(length=255)), Column('launch_index', Integer), Column('key_name', String(length=255)), Column('key_data', MediumText()), Column('power_state', Integer), Column('vm_state', String(length=255)), Column('memory_mb', Integer), Column('vcpus', Integer), Column('hostname', String(length=255)), Column('host', String(length=255)), Column('user_data', MediumText()), Column('reservation_id', String(length=255)), Column('scheduled_at', DateTime), Column('launched_at', DateTime), Column('terminated_at', DateTime), Column('display_name', String(length=255)), Column('display_description', String(length=255)), Column('availability_zone', String(length=255)), Column('locked', Boolean), Column('os_type', String(length=255)), Column('launched_on', MediumText()), Column('instance_type_id', Integer), Column('vm_mode', String(length=255)), Column('uuid', String(length=36)), Column('architecture', String(length=255)), Column('root_device_name', String(length=255)), Column('access_ip_v4', String(length=255)), Column('access_ip_v6', String(length=255)), Column('config_drive', String(length=255)), Column('task_state', String(length=255)), Column('default_ephemeral_device', String(length=255)), Column('default_swap_device', String(length=255)), Column('progress', Integer), Column('auto_disk_config', Boolean), Column('shutdown_terminate', Boolean), Column('disable_terminate', Boolean), Column('root_gb', Integer), Column('ephemeral_gb', Integer), Column('cell_name', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) iscsi_targets = Table('iscsi_targets', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('target_num', Integer), Column('host', String(length=255)), Column('volume_id', String(length=36), nullable=True), mysql_engine='InnoDB', mysql_charset='utf8' ) key_pairs = Table('key_pairs', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('name', String(length=255)), Column('user_id', String(length=255)), Column('fingerprint', String(length=255)), Column('public_key', MediumText()), mysql_engine='InnoDB', mysql_charset='utf8' ) migrations = Table('migrations', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('source_compute', String(length=255)), Column('dest_compute', String(length=255)), Column('dest_host', String(length=255)), Column('status', String(length=255)), Column('instance_uuid', String(length=255)), Column('old_instance_type_id', Integer), Column('new_instance_type_id', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) networks = Table('networks', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('injected', Boolean), Column('cidr', String(length=255)), Column('netmask', String(length=255)), Column('bridge', String(length=255)), Column('gateway', String(length=255)), Column('broadcast', String(length=255)), Column('dns1', String(length=255)), Column('vlan', Integer), Column('vpn_public_address', String(length=255)), Column('vpn_public_port', Integer), Column('vpn_private_address', String(length=255)), Column('dhcp_start', String(length=255)), Column('project_id', String(length=255)), Column('host', String(length=255)), Column('cidr_v6', String(length=255)), Column('gateway_v6', String(length=255)), Column('label', String(length=255)), Column('netmask_v6', String(length=255)), Column('bridge_interface', String(length=255)), Column('multi_host', Boolean), Column('dns2', String(length=255)), Column('uuid', String(length=36)), Column('priority', Integer), Column('rxtx_base', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) provider_fw_rules = Table('provider_fw_rules', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('protocol', String(length=5)), Column('from_port', Integer), Column('to_port', Integer), Column('cidr', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) quota_classes = Table('quota_classes', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('class_name', String(length=255)), Column('resource', String(length=255)), Column('hard_limit', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) quota_usages = Table('quota_usages', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('project_id', String(length=255)), Column('resource', String(length=255)), Column('in_use', Integer, nullable=False), Column('reserved', Integer, nullable=False), Column('until_refresh', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) quotas = Table('quotas', meta, Column('id', Integer, primary_key=True, nullable=False), Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('project_id', String(length=255)), Column('resource', String(length=255), nullable=False), Column('hard_limit', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) reservations = Table('reservations', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('uuid', String(length=36), nullable=False), Column('usage_id', Integer, nullable=False), Column('project_id', String(length=255)), Column('resource', String(length=255)), Column('delta', Integer, nullable=False), Column('expire', DateTime), mysql_engine='InnoDB', mysql_charset='utf8' ) s3_images = Table('s3_images', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('uuid', String(length=36), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8' ) security_group_instance_association = \ Table('security_group_instance_association', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('security_group_id', Integer, ForeignKey('security_groups.id')), Column('instance_uuid', String(length=36)), mysql_engine='InnoDB', mysql_charset='utf8' ) security_group_rules = Table('security_group_rules', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('parent_group_id', Integer, ForeignKey('security_groups.id')), Column('protocol', String(length=255)), Column('from_port', Integer), Column('to_port', Integer), Column('cidr', String(length=255)), Column('group_id', Integer, ForeignKey('security_groups.id')), mysql_engine='InnoDB', mysql_charset='utf8' ) security_groups = Table('security_groups', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('name', String(length=255)), Column('description', String(length=255)), Column('user_id', String(length=255)), Column('project_id', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) services = Table('services', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('host', String(length=255)), Column('binary', String(length=255)), Column('topic', String(length=255)), Column('report_count', Integer, nullable=False), Column('disabled', Boolean), Column('availability_zone', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) sm_backend_config = Table('sm_backend_config', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('flavor_id', Integer, ForeignKey('sm_flavors.id'), nullable=False), Column('sr_uuid', String(length=255)), Column('sr_type', String(length=255)), Column('config_params', String(length=2047)), mysql_engine='InnoDB', mysql_charset='utf8' ) sm_flavors = Table('sm_flavors', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('label', String(length=255)), Column('description', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) sm_volume = Table('sm_volume', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', String(length=36), primary_key=True, nullable=False, autoincrement=False), Column('backend_id', Integer, nullable=False), Column('vdi_uuid', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) snapshot_id_mappings = Table('snapshot_id_mappings', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('uuid', String(length=36), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8' ) snapshots = Table('snapshots', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', String(length=36), primary_key=True, nullable=False), Column('volume_id', String(length=36), nullable=False), Column('user_id', String(length=255)), Column('project_id', String(length=255)), Column('status', String(length=255)), Column('progress', String(length=255)), Column('volume_size', Integer), Column('scheduled_at', DateTime), Column('display_name', String(length=255)), Column('display_description', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) task_log = Table('task_log', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('task_name', String(length=255), nullable=False), Column('state', String(length=255), nullable=False), Column('host', String(length=255), nullable=False), Column('period_beginning', String(length=255), nullable=False), Column('period_ending', String(length=255), nullable=False), Column('message', String(length=255), nullable=False), Column('task_items', Integer), Column('errors', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) virtual_interfaces = Table('virtual_interfaces', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('address', String(length=255), unique=True), Column('network_id', Integer), Column('uuid', String(length=36)), Column('instance_uuid', String(length=36), nullable=True), mysql_engine='InnoDB', mysql_charset='utf8' ) virtual_storage_arrays = Table('virtual_storage_arrays', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('display_name', String(length=255)), Column('display_description', String(length=255)), Column('project_id', String(length=255)), Column('availability_zone', String(length=255)), Column('instance_type_id', Integer, nullable=False), Column('image_ref', String(length=255)), Column('vc_count', Integer, nullable=False), Column('vol_count', Integer, nullable=False), Column('status', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) volume_id_mappings = Table('volume_id_mappings', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('uuid', String(length=36), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8' ) volume_metadata = Table('volume_metadata', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('volume_id', String(length=36), nullable=False), Column('key', String(length=255)), Column('value', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) volume_type_extra_specs = Table('volume_type_extra_specs', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('volume_type_id', Integer, ForeignKey('volume_types.id'), nullable=False), Column('key', String(length=255)), Column('value', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) volume_types = Table('volume_types', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('name', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) volumes = Table('volumes', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', String(length=36), primary_key=True, nullable=False), Column('ec2_id', String(length=255)), Column('user_id', String(length=255)), Column('project_id', String(length=255)), Column('host', String(length=255)), Column('size', Integer), Column('availability_zone', String(length=255)), Column('mountpoint', String(length=255)), Column('status', String(length=255)), Column('attach_status', String(length=255)), Column('scheduled_at', DateTime), Column('launched_at', DateTime), Column('terminated_at', DateTime), Column('display_name', String(length=255)), Column('display_description', String(length=255)), Column('provider_location', String(length=256)), Column('provider_auth', String(length=256)), Column('snapshot_id', String(length=36)), Column('volume_type_id', Integer), Column('instance_uuid', String(length=36)), Column('attach_time', DateTime), mysql_engine='InnoDB', mysql_charset='utf8' ) instances.create() Index('uuid', instances.c.uuid, unique=True).create(migrate_engine) Index('project_id', instances.c.project_id).create(migrate_engine) # create all tables tables = [aggregates, console_pools, instance_types, security_groups, sm_flavors, sm_backend_config, snapshots, volume_types, volumes, # those that are children and others later agent_builds, aggregate_hosts, aggregate_metadata, block_device_mapping, bw_usage_cache, cells, certificates, compute_node_stats, compute_nodes, consoles, dns_domains, fixed_ips, floating_ips, instance_faults, instance_id_mappings, instance_info_caches, instance_metadata, instance_system_metadata, instance_type_extra_specs, instance_type_projects, iscsi_targets, key_pairs, migrations, networks, provider_fw_rules, quota_classes, quota_usages, quotas, reservations, s3_images, security_group_instance_association, security_group_rules, services, sm_volume, snapshot_id_mappings, task_log, virtual_interfaces, virtual_storage_arrays, volume_id_mappings, volume_metadata, volume_type_extra_specs] for table in tables: try: table.create() except Exception: LOG.info(repr(table)) LOG.exception(_('Exception while creating table.')) raise indexes = [ # agent_builds Index('agent_builds_hypervisor_os_arch_idx', agent_builds.c.hypervisor, agent_builds.c.os, agent_builds.c.architecture), # aggregate_metadata Index('aggregate_metadata_key_idx', aggregate_metadata.c.key), # block_device_mapping Index('block_device_mapping_instance_uuid_idx', block_device_mapping.c.instance_uuid), Index('block_device_mapping_instance_uuid_device_name_idx', block_device_mapping.c.instance_uuid, block_device_mapping.c.device_name), Index( 'block_device_mapping_instance_uuid_virtual_name_device_name_idx', block_device_mapping.c.instance_uuid, block_device_mapping.c.virtual_name, block_device_mapping.c.device_name), Index('block_device_mapping_instance_uuid_volume_id_idx', block_device_mapping.c.instance_uuid, block_device_mapping.c.volume_id), # bw_usage_cache Index('bw_usage_cache_uuid_start_period_idx', bw_usage_cache.c.uuid, bw_usage_cache.c.start_period), # certificates Index('certificates_project_id_deleted_idx', certificates.c.project_id, certificates.c.deleted), Index('certificates_user_id_deleted_idx', certificates.c.user_id, certificates.c.deleted), # compute_node_stats Index('ix_compute_node_stats_compute_node_id', compute_node_stats.c.compute_node_id), # consoles Index('consoles_instance_uuid_idx', consoles.c.instance_uuid), # dns_domains Index('dns_domains_domain_deleted_idx', dns_domains.c.domain, dns_domains.c.deleted), # fixed_ips Index('fixed_ips_host_idx', fixed_ips.c.host), Index('fixed_ips_network_id_host_deleted_idx', fixed_ips.c.network_id, fixed_ips.c.host, fixed_ips.c.deleted), Index('fixed_ips_address_reserved_network_id_deleted_idx', fixed_ips.c.address, fixed_ips.c.reserved, fixed_ips.c.network_id, fixed_ips.c.deleted), # floating_ips Index('floating_ips_host_idx', floating_ips.c.host), Index('floating_ips_project_id_idx', floating_ips.c.project_id), Index('floating_ips_pool_deleted_fixed_ip_id_project_id_idx', floating_ips.c.pool, floating_ips.c.deleted, floating_ips.c.fixed_ip_id, floating_ips.c.project_id), # instance_faults Index('instance_faults_instance_uuid_deleted_created_at_idx', instance_faults.c.instance_uuid, instance_faults.c.deleted, instance_faults.c.created_at), # instance_type_extra_specs Index('instance_type_extra_specs_instance_type_id_key_idx', instance_type_extra_specs.c.instance_type_id, instance_type_extra_specs.c.key), # instance_id_mappings Index('ix_instance_id_mappings_uuid', instance_id_mappings.c.uuid), # instance_metadata Index('instance_metadata_instance_uuid_idx', instance_metadata.c.instance_uuid), # instances Index('instances_host_deleted_idx', instances.c.host, instances.c.deleted), Index('instances_reservation_id_idx', instances.c.reservation_id), Index('instances_terminated_at_launched_at_idx', instances.c.terminated_at, instances.c.launched_at), Index('instances_uuid_deleted_idx', instances.c.uuid, instances.c.deleted), Index('instances_task_state_updated_at_idx', instances.c.task_state, instances.c.updated_at), # iscsi_targets Index('iscsi_targets_host_idx', iscsi_targets.c.host), Index('iscsi_targets_host_volume_id_deleted_idx', iscsi_targets.c.host, iscsi_targets.c.volume_id, iscsi_targets.c.deleted), # key_pairs Index('key_pair_user_id_name_idx', key_pairs.c.user_id, key_pairs.c.name), # networks Index('networks_bridge_deleted_idx', networks.c.bridge, networks.c.deleted), Index('networks_host_idx', networks.c.host), Index('networks_project_id_deleted_idx', networks.c.project_id, networks.c.deleted), Index('networks_uuid_project_id_deleted_idx', networks.c.uuid, networks.c.project_id, networks.c.deleted), Index('networks_vlan_deleted_idx', networks.c.vlan, networks.c.deleted), Index('networks_cidr_v6_idx', networks.c.cidr_v6), # reservations Index('ix_reservations_project_id', reservations.c.project_id), # security_group_instance_association Index('security_group_instance_association_instance_uuid_idx', security_group_instance_association.c.instance_uuid), # quota_classes Index('ix_quota_classes_class_name', quota_classes.c.class_name), # quota_usages Index('ix_quota_usages_project_id', quota_usages.c.project_id), # volumes Index('volumes_instance_uuid_idx', volumes.c.instance_uuid), # task_log Index('ix_task_log_period_beginning', task_log.c.period_beginning), Index('ix_task_log_host', task_log.c.host), Index('ix_task_log_period_ending', task_log.c.period_ending), ] mysql_indexes = [ # TODO(dprince): review these for removal. Some of these indexes # were automatically created by SQLAlchemy migrate and *may* no longer # be in use Index('instance_type_id', instance_type_projects.c.instance_type_id), Index('project_id', dns_domains.c.project_id), Index('fixed_ip_id', floating_ips.c.fixed_ip_id), Index('backend_id', sm_volume.c.backend_id), Index('network_id', virtual_interfaces.c.network_id), Index('network_id', fixed_ips.c.network_id), Index('fixed_ips_virtual_interface_id_fkey', fixed_ips.c.virtual_interface_id), Index('address', fixed_ips.c.address), Index('fixed_ips_instance_uuid_fkey', fixed_ips.c.instance_uuid), Index('instance_uuid', instance_system_metadata.c.instance_uuid), Index('iscsi_targets_volume_id_fkey', iscsi_targets.c.volume_id), Index('snapshot_id', block_device_mapping.c.snapshot_id), Index('usage_id', reservations.c.usage_id), Index('virtual_interfaces_instance_uuid_fkey', virtual_interfaces.c.instance_uuid), Index('volume_id', block_device_mapping.c.volume_id), Index('volume_metadata_volume_id_fkey', volume_metadata.c.volume_id), ] # MySQL specific indexes if migrate_engine.name == 'mysql': for index in mysql_indexes: index.create(migrate_engine) # PostgreSQL specific indexes if migrate_engine.name == 'postgresql': Index('address', fixed_ips.c.address).create() # Common indexes if migrate_engine.name == 'mysql' or migrate_engine.name == 'postgresql': for index in indexes: index.create(migrate_engine) fkeys = [ [[fixed_ips.c.instance_uuid], [instances.c.uuid], 'fixed_ips_instance_uuid_fkey'], [[block_device_mapping.c.instance_uuid], [instances.c.uuid], 'block_device_mapping_instance_uuid_fkey'], [[consoles.c.instance_uuid], [instances.c.uuid], 'consoles_instance_uuid_fkey'], [[instance_info_caches.c.instance_uuid], [instances.c.uuid], 'instance_info_caches_instance_uuid_fkey'], [[instance_metadata.c.instance_uuid], [instances.c.uuid], 'instance_metadata_instance_uuid_fkey'], [[instance_system_metadata.c.instance_uuid], [instances.c.uuid], 'instance_system_metadata_ibfk_1'], [[instance_type_projects.c.instance_type_id], [instance_types.c.id], 'instance_type_projects_ibfk_1'], [[iscsi_targets.c.volume_id], [volumes.c.id], 'iscsi_targets_volume_id_fkey'], [[reservations.c.usage_id], [quota_usages.c.id], 'reservations_ibfk_1'], [[security_group_instance_association.c.instance_uuid], [instances.c.uuid], 'security_group_instance_association_instance_uuid_fkey'], [[sm_volume.c.backend_id], [sm_backend_config.c.id], 'sm_volume_ibfk_2'], [[sm_volume.c.id], [volumes.c.id], 'sm_volume_id_fkey'], [[virtual_interfaces.c.instance_uuid], [instances.c.uuid], 'virtual_interfaces_instance_uuid_fkey'], [[volume_metadata.c.volume_id], [volumes.c.id], 'volume_metadata_volume_id_fkey'], ] for fkey_pair in fkeys: if migrate_engine.name == 'mysql': # For MySQL we name our fkeys explicitly so they match Folsom fkey = ForeignKeyConstraint(columns=fkey_pair[0], refcolumns=fkey_pair[1], name=fkey_pair[2]) fkey.create() elif migrate_engine.name == 'postgresql': # PostgreSQL names things like it wants (correct and compatible!) fkey = ForeignKeyConstraint(columns=fkey_pair[0], refcolumns=fkey_pair[1]) fkey.create() if migrate_engine.name == "mysql": # In Folsom we explicitly converted migrate_version to UTF8. sql = "ALTER TABLE migrate_version CONVERT TO CHARACTER SET utf8;" # Set default DB charset to UTF8. sql += "ALTER DATABASE %s DEFAULT CHARACTER SET utf8;" % \ migrate_engine.url.database migrate_engine.execute(sql) # TODO(dprince): due to the upgrade scripts in Folsom the unique key # on instance_uuid is named 'instance_id'. Rename it in Grizzly? UniqueConstraint('instance_uuid', table=instance_info_caches, name='instance_id').create() if migrate_engine.name == "postgresql": # TODO(dprince): Drop this in Grizzly. Snapshots were converted # to UUIDs in Folsom so we no longer require this autocreated # sequence. sql = """CREATE SEQUENCE snapshots_id_seq START WITH 1 INCREMENT BY 1 NO MINVALUE NO MAXVALUE CACHE 1; ALTER SEQUENCE snapshots_id_seq OWNED BY snapshots.id; SELECT pg_catalog.setval('snapshots_id_seq', 1, false); ALTER TABLE ONLY snapshots ALTER COLUMN id SET DEFAULT nextval('snapshots_id_seq'::regclass);""" # TODO(dprince): Drop this in Grizzly. Volumes were converted # to UUIDs in Folsom so we no longer require this autocreated # sequence. sql += """CREATE SEQUENCE volumes_id_seq START WITH 1 INCREMENT BY 1 NO MINVALUE NO MAXVALUE CACHE 1; ALTER SEQUENCE volumes_id_seq OWNED BY volumes.id; SELECT pg_catalog.setval('volumes_id_seq', 1, false); ALTER TABLE ONLY volumes ALTER COLUMN id SET DEFAULT nextval('volumes_id_seq'::regclass);""" migrate_engine.execute(sql) # TODO(dprince): due to the upgrade scripts in Folsom the unique key # on instance_uuid is named '.._instance_id_..'. Rename it in Grizzly? UniqueConstraint('instance_uuid', table=instance_info_caches, name='instance_info_caches_instance_id_key').create() # populate initial instance types _populate_instance_types(instance_types)
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine asset = Table( 'asset', meta, Column('created_at', DATETIME), Column('updated_at', DATETIME), Column('deleted_at', DATETIME), Column('deleted', INTEGER(display_width=11)), Column('key', VARCHAR(length=128)), Column('id', INTEGER(display_width=11), primary_key=True, nullable=False), Column('name', VARCHAR(length=63), nullable=False), Column('brand', VARCHAR(length=63)), Column('model', VARCHAR(length=63)), Column('serial', VARCHAR(length=63)), Column('mac', VARCHAR(length=31)), Column('ip', VARCHAR(length=127)), Column('type', VARCHAR(length=31), nullable=False), Column('location', VARCHAR(length=31), nullable=False), Column('asset_tag', VARCHAR(length=31)), Column('status', ENUM(u'New', u'Discovered', u'DiscoveryMismatch', u'Decommissioned'), nullable=False), Column('protected', TINYINT(display_width=1), nullable=False), Column('rack_id', INTEGER(display_width=11)), ) cluster = Table( 'cluster', meta, Column('created_at', DATETIME), Column('updated_at', DATETIME), Column('deleted_at', DATETIME), Column('deleted', INTEGER(display_width=11)), Column('key', VARCHAR(length=128)), Column('id', INTEGER(display_width=11), primary_key=True, nullable=False), Column('name', VARCHAR(length=255), nullable=False), Column('location', VARCHAR(length=31), nullable=False), Column('type', VARCHAR(length=255), nullable=False), ) network_map = Table( 'network_map', meta, Column('created_at', DATETIME), Column('updated_at', DATETIME), Column('deleted_at', DATETIME), Column('deleted', INTEGER(display_width=11)), Column('key', VARCHAR(length=128)), Column('id', INTEGER(display_width=11), primary_key=True, nullable=False), Column('name', VARCHAR(length=255), nullable=False), Column('mgmt_port_map', TEXT), Column('number2unit', TEXT), Column('pxe_nic', VARCHAR(length=63)), Column('network', TEXT), ) rack = Table( 'rack', meta, Column('created_at', DATETIME), Column('updated_at', DATETIME), Column('deleted_at', DATETIME), Column('deleted', INTEGER(display_width=11)), Column('key', VARCHAR(length=128)), Column('id', INTEGER(display_width=11), primary_key=True, nullable=False), Column('name', VARCHAR(length=255), nullable=False), Column('location', VARCHAR(length=31), nullable=False), Column('status', VARCHAR(length=31)), Column('gw_ip', VARCHAR(length=31)), Column('environment', VARCHAR(length=31)), Column('sku_quota', TEXT), Column('network_map_id', INTEGER(display_width=11)), Column('worker_id', INTEGER(display_width=11)), Column('meta', TEXT), ) server = Table( 'server', meta, Column('created_at', DATETIME), Column('updated_at', DATETIME), Column('deleted_at', DATETIME), Column('deleted', INTEGER(display_width=11)), Column('key', VARCHAR(length=128)), Column('id', INTEGER(display_width=11), primary_key=True, nullable=False), Column('name', VARCHAR(length=255), nullable=False), Column('status', ENUM(u'Unmanaged', u'Unknown', u'Validating', u'ValidatedWithErrors', u'Validated', u'Provisioning', u'ProvisionedWithErrors', u'Provisioned', u'Deploying', u'DeployedWithErrors', u'Deployed'), nullable=False), Column('pxe_ip', VARCHAR(length=15)), Column('pxe_mac', VARCHAR(length=31)), Column('role', VARCHAR(length=64)), Column('fqdn', VARCHAR(length=255)), Column('server_number', VARCHAR(length=15)), Column('rack_unit', INTEGER(display_width=11)), Column('description', TEXT), Column('lock_id', VARCHAR(length=36), nullable=False), Column('hdd_type', VARCHAR(length=127)), Column('os_args', TEXT), Column('role_alias', VARCHAR(length=64)), Column('gw_ip', VARCHAR(length=15)), Column('target_status', ENUM(u'Unmanaged', u'Validated', u'Provisioned', u'Deployed'), nullable=False), Column('message', VARCHAR(length=255)), Column('meta', TEXT), Column('asset_id', INTEGER(display_width=11)), Column('cluster_id', INTEGER(display_width=11)), Column('sku_id', INTEGER(display_width=11)), Column('version', INTEGER(display_width=11), nullable=False), ) server_interface = Table( 'server_interface', meta, Column('created_at', DATETIME), Column('updated_at', DATETIME), Column('deleted_at', DATETIME), Column('deleted', INTEGER(display_width=11)), Column('key', VARCHAR(length=128)), Column('name', VARCHAR(length=63), nullable=False), Column('mac', VARCHAR(length=31)), Column('id', INTEGER(display_width=11), primary_key=True, nullable=False), Column('server_id', INTEGER(display_width=11)), Column('state', VARCHAR(length=16)), ) sku = Table( 'sku', meta, Column('created_at', DATETIME), Column('updated_at', DATETIME), Column('deleted_at', DATETIME), Column('deleted', INTEGER(display_width=11)), Column('key', VARCHAR(length=128)), Column('id', INTEGER(display_width=11), primary_key=True, nullable=False), Column('name', VARCHAR(length=255), nullable=False), Column('location', VARCHAR(length=31), nullable=False), Column('description', VARCHAR(length=255)), Column('cpu', VARCHAR(length=255), nullable=False), Column('ram', VARCHAR(length=255), nullable=False), Column('storage', VARCHAR(length=255), nullable=False), ) subnet = Table( 'subnet', meta, Column('created_at', DATETIME), Column('updated_at', DATETIME), Column('deleted_at', DATETIME), Column('deleted', INTEGER(display_width=11)), Column('key', VARCHAR(length=128)), Column('id', INTEGER(display_width=11), primary_key=True, nullable=False), Column('name', VARCHAR(length=255), nullable=False), Column('location', VARCHAR(length=31), nullable=False), Column('ip', VARCHAR(length=31), nullable=False), Column('mask', VARCHAR(length=31), nullable=False), Column('vlan_tag', INTEGER(display_width=11), nullable=False), Column('gateway', VARCHAR(length=31), nullable=False), Column('tagged', TINYINT(display_width=1)), ) switch = Table( 'switch', meta, Column('created_at', DATETIME), Column('updated_at', DATETIME), Column('deleted_at', DATETIME), Column('deleted', INTEGER(display_width=11)), Column('key', VARCHAR(length=128)), Column('id', INTEGER(display_width=11), primary_key=True, nullable=False), Column('name', VARCHAR(length=255), nullable=False), Column('asset_id', INTEGER(display_width=11)), ) switch_interface = Table( 'switch_interface', meta, Column('created_at', DATETIME), Column('updated_at', DATETIME), Column('deleted_at', DATETIME), Column('deleted', INTEGER(display_width=11)), Column('key', VARCHAR(length=128)), Column('name', VARCHAR(length=63), nullable=False), Column('mac', VARCHAR(length=31)), Column('id', INTEGER(display_width=11), primary_key=True, nullable=False), Column('ip', VARCHAR(length=15)), Column('mask', VARCHAR(length=15)), Column('gw', VARCHAR(length=31)), Column('net_ip', VARCHAR(length=31)), Column('switch_id', INTEGER(display_width=11)), ) worker = Table( 'worker', meta, Column('created_at', DATETIME), Column('updated_at', DATETIME), Column('deleted_at', DATETIME), Column('deleted', INTEGER(display_width=11)), Column('key', VARCHAR(length=128)), Column('id', INTEGER(display_width=11), primary_key=True, nullable=False), Column('name', VARCHAR(length=63), nullable=False), Column('worker_url', VARCHAR(length=255), nullable=False), Column('location', VARCHAR(length=31), nullable=False)) # create all tables tables = [ cluster, sku, network_map, subnet, worker, rack, asset, server, server_interface, switch, switch_interface ] for table in tables: try: table.create() except Exception: LOG.info(repr(table)) LOG.exception('Exception while creating table.') raise indexes = [ Index('asset_serial_idx', asset.c.serial), Index('asset_mac_idx', asset.c.mac), Index('server_name_idx', server.c.name), Index('server_status_idx', server.c.status), ] for index in indexes: index.create(migrate_engine) f_keys = [ [[rack.c.network_map_id], [network_map.c.id]], [[rack.c.worker_id], [worker.c.id]], [[asset.c.rack_id], [rack.c.id]], [[switch.c.asset_id], [asset.c.id]], [[switch_interface.c.switch_id], [switch.c.id]], [[server.c.asset_id], [asset.c.id]], [[server.c.cluster_id], [cluster.c.id]], [[server.c.sku_id], [sku.c.id]], [[server_interface.c.server_id], [server.c.id]], ] for f_key_pair in f_keys: if migrate_engine.name in ('mysql', 'postgresql'): fkey = ForeignKeyConstraint(columns=f_key_pair[0], refcolumns=f_key_pair[1]) fkey.create()