def drop_index(index, table, column_name=None, metadata=None): """ :param index: Index to drop :type index: :class:`Index` or str :param table: Table to drop the index from :type table: :class:`Table` or str :param metadata: Needed only if ``table`` is a table name :type metadata: :class:`Metadata` """ try: if not isinstance(index, Index): if not isinstance(table, Table): assert metadata is not None table = Table(table, metadata, autoload=True) index_name = truncate_index_name(index, table.metadata.bind) if index_name in [ix.name for ix in table.indexes]: index = Index(index_name, table.c[column_name]) else: log.debug("Index '%s' in table '%s' does not exist.", index, table) return index.drop() except Exception: log.exception("Dropping index '%s' from table '%s' failed", index, table)
def downgrade(migrate_engine): meta.bind = migrate_engine # Domains Table domains_table = Table("domains", meta, autoload=True) rev_ind = Index("reverse_name_deleted", domains_table.c.reverse_name, domains_table.c.deleted) rev_ind.drop(migrate_engine) # Recordsets Table rsets_table = Table("recordsets", meta, autoload=True) rev_ind = Index("reverse_name_dom_id", rsets_table.c.reverse_name, rsets_table.c.domain_id) rev_ind.drop(migrate_engine) domains_table.c.reverse_name.drop() rsets_table.c.reverse_name.drop() # Recreate constraints for SQLite dialect = migrate_engine.url.get_dialect().name if dialect.startswith("sqlite"): domains_constraint = UniqueConstraint("name", "deleted", name="unique_domain_name", table=domains_table) recordsets_constraint = UniqueConstraint( "domain_id", "name", "type", name="unique_recordset", table=rsets_table ) domains_constraint.create() recordsets_constraint.create()
def upgrade(migrate_engine): meta.bind = migrate_engine records_table = Table('records', meta, autoload=True) name_idx = Index('rec_name_index', records_table.c.name) name_idx.drop()
def upgrade(migrate_engine): display_migration_details() metadata.bind = migrate_engine db_session = scoped_session( sessionmaker( bind=migrate_engine, autoflush=False, autocommit=True ) ) HistoryDatasetAssociation_table = Table( "history_dataset_association", metadata, autoload=True ) # Load existing tables metadata.reflect() # Add 2 indexes to the galaxy_user table i = Index( 'ix_hda_extension', HistoryDatasetAssociation_table.c.extension ) try: i.create() except Exception as e: log.debug( "Adding index 'ix_hda_extension' to history_dataset_association table failed: %s" % ( str( e ) ) ) # Set the default data in the galaxy_user table, but only for null values cmd = "UPDATE history_dataset_association SET extension = 'qual454' WHERE extension = 'qual' and peek like \'>%%\'" try: db_session.execute( cmd ) except Exception as e: log.debug( "Resetting extension qual to qual454 in history_dataset_association failed: %s" % ( str( e ) ) ) cmd = "UPDATE history_dataset_association SET extension = 'qualsolexa' WHERE extension = 'qual' and peek not like \'>%%\'" try: db_session.execute( cmd ) except Exception as e: log.debug( "Resetting extension qual to qualsolexa in history_dataset_association failed: %s" % ( str( e ) ) ) # Add 1 index to the history_dataset_association table try: i.drop() except Exception as e: log.debug( "Dropping index 'ix_hda_extension' to history_dataset_association table failed: %s" % ( str( e ) ) )
def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine t = Table("aggregate_metadata", meta, autoload=True) i = Index("aggregate_metadata_key_idx", t.c.key) i.drop(migrate_engine)
def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine t = Table('key_pairs', meta, autoload=True) i = Index('key_pair_user_id_name_idx', t.c.user_id, t.c.name) i.drop(migrate_engine)
def downgrade(migrate_engine): meta.bind = migrate_engine # Domains Table domains_table = Table('domains', meta, autoload=True) rev_ind = Index('reverse_name_deleted', domains_table.c.reverse_name, domains_table.c.deleted) rev_ind.drop(migrate_engine) # Recordsets Table rsets_table = Table('recordsets', meta, autoload=True) rev_ind = Index('reverse_name_dom_id', rsets_table.c.reverse_name, rsets_table.c.domain_id) rev_ind.drop(migrate_engine) domains_table.c.reverse_name.drop() rsets_table.c.reverse_name.drop() # Recreate constraints for SQLite dialect = migrate_engine.url.get_dialect().name if dialect.startswith('sqlite'): domains_constraint = UniqueConstraint('name', 'deleted', name='unique_domain_name', table=domains_table) recordsets_constraint = UniqueConstraint('domain_id', 'name', 'type', name='unique_recordset', table=rsets_table) domains_constraint.create() recordsets_constraint.create()
def upgrade(migrate_engine): print(__doc__) metadata.bind = migrate_engine HistoryDatasetAssociation_table = Table("history_dataset_association", metadata, autoload=True) # Load existing tables metadata.reflect() # Add 2 indexes to the galaxy_user table i = Index('ix_hda_extension', HistoryDatasetAssociation_table.c.extension) try: i.create() except Exception: log.exception("Adding index 'ix_hda_extension' to history_dataset_association table failed.") # Set the default data in the galaxy_user table, but only for null values cmd = "UPDATE history_dataset_association SET extension = 'qual454' WHERE extension = 'qual' and peek like \'>%%\'" try: migrate_engine.execute(cmd) except Exception: log.exception("Resetting extension qual to qual454 in history_dataset_association failed.") cmd = "UPDATE history_dataset_association SET extension = 'qualsolexa' WHERE extension = 'qual' and peek not like \'>%%\'" try: migrate_engine.execute(cmd) except Exception: log.exception("Resetting extension qual to qualsolexa in history_dataset_association failed.") # Add 1 index to the history_dataset_association table try: i.drop() except Exception: log.exception("Dropping index 'ix_hda_extension' to history_dataset_association table failed.")
def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine t = Table('dns_domains', meta, autoload=True) i = Index('dns_domains_domain_deleted_idx', t.c.domain, t.c.deleted) i.drop(migrate_engine)
def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine t = Table('aggregate_metadata', meta, autoload=True) i = Index('aggregate_metadata_key_idx', t.c.key) i.drop(migrate_engine)
def upgrade(migrate_engine): meta.bind = migrate_engine records_table = Table("records", meta, autoload=True) name_idx = Index("rec_name_index", records_table.c.name) name_idx.drop()
def downgrade(migrate_engine): metadata.bind = migrate_engine metadata.reflect() # Drop the library_folder_id column try: Job_table = Table( "job", metadata, autoload=True ) except NoSuchTableError: Job_table = None log.debug( "Failed loading table job" ) if Job_table is not None: try: col = Job_table.c.library_folder_id col.drop() except Exception as e: log.debug( "Dropping column 'library_folder_id' from job table failed: %s" % ( str( e ) ) ) # Drop the job_to_output_library_dataset table try: JobToOutputLibraryDatasetAssociation_table.drop() except Exception as e: print(str(e)) log.debug( "Dropping job_to_output_library_dataset table failed: %s" % str( e ) ) # Drop the ix_dataset_state index try: Dataset_table = Table( "dataset", metadata, autoload=True ) except NoSuchTableError: Dataset_table = None log.debug( "Failed loading table dataset" ) i = Index( "ix_dataset_state", Dataset_table.c.state ) try: i.drop() except Exception as e: print(str(e)) log.debug( "Dropping index 'ix_dataset_state' from dataset table failed: %s" % str( e ) )
def downgrade(migrate_engine): metadata.bind = migrate_engine metadata.reflect() # Drop the library_folder_id column try: Job_table = Table( "job", metadata, autoload=True ) except NoSuchTableError: Job_table = None log.debug( "Failed loading table job" ) if Job_table is not None: try: col = Job_table.c.library_folder_id col.drop() except Exception: log.exception("Dropping column 'library_folder_id' from job table failed.") # Drop the job_to_output_library_dataset table try: JobToOutputLibraryDatasetAssociation_table.drop() except Exception: log.exception("Dropping job_to_output_library_dataset table failed.") # Drop the ix_dataset_state index try: Dataset_table = Table( "dataset", metadata, autoload=True ) except NoSuchTableError: Dataset_table = None log.debug( "Failed loading table dataset" ) i = Index( "ix_dataset_state", Dataset_table.c.state ) try: i.drop() except Exception: log.exception("Dropping index 'ix_dataset_state' from dataset table failed.")
def downgrade(migrate_engine): meta.bind = migrate_engine records_table = Table('records', meta, autoload=True) index = Index('designate_recordset_id', records_table.c.designate_recordset_id) index.drop()
def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine t = Table('bw_usage_cache', meta, autoload=True) i = Index('bw_usage_cache_uuid_start_period_idx', t.c.uuid, t.c.start_period) i.drop(migrate_engine)
def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine t = Table('agent_builds', meta, autoload=True) i = Index('agent_builds_hypervisor_os_arch_idx', t.c.hypervisor, t.c.os, t.c.architecture) i.drop(migrate_engine)
def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine schedules = Table('schedules', meta, autoload=True) index = Index(INDEX_NAME, schedules.c.next_run) index.drop(migrate_engine)
def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine t = Table('instance_type_extra_specs', meta, autoload=True) i = Index('instance_type_extra_specs_instance_type_id_key_idx', t.c.instance_type_id, t.c.key) i.drop(migrate_engine)
def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine reservations = Table('reservations', meta, autoload=True) index = Index('reservations_uuid_idx', reservations.c.uuid) index.drop(migrate_engine)
def downgrade(migrate_engine): meta = MetaData(bind=migrate_engine) upload = Table("upload", meta, autoload=True) idx_upload_uploader_id = Index("idx_upload_uploader_id", upload.c.uploader_id) idx_upload_uploader_id.drop(migrate_engine) upload.c.uploader_id.drop()
def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine t = Table('migrations', meta, autoload=True) i = Index('migrations_instance_uuid_and_status_idx', t.c.deleted, t.c.instance_uuid, t.c.status) i.drop(migrate_engine)
def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine jobs = Table('jobs', meta, autoload=True) index = Index(INDEX_NAME, jobs.c.hard_timeout) index.drop(migrate_engine)
def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine t = Table('instance_faults', meta, autoload=True) i = Index('instance_faults_instance_uuid_deleted_created_at_idx', t.c.instance_uuid, t.c.deleted, t.c.created_at) i.drop(migrate_engine)
def drop_index(table, index_name, col_name): u"""为表某个字段删除索引""" try: i = Index(index_name, getattr(table.c, col_name)) print 'dropping index', i i.drop(bind=table.metadata.bind) except: print traceback.format_exc()
def downgrade(migrate_engine): meta = MetaData(bind=migrate_engine) user = Table("user", meta, autoload=True) idx_user_site_id = Index("idx_user_sire_id", user.c.site_id) idx_user_site_id.drop(migrate_engine) user.c.site_id.drop()
def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine images = Table('images', meta, autoload=True) index = Index(INDEX_NAME, images.c.owner) index.drop(migrate_engine)
def downgrade(migrate_engine): meta.bind = migrate_engine t = Table("participant_identifier_source", meta, autoload=True) idx = Index('idx__pis__linked_minimum_patient_identifier_source_id', t.c.linked_minimum_patient_identifier_source_id) idx.drop(migrate_engine)
def downgrade(migrate_engine): meta = MetaData(bind=migrate_engine) batch = Table("batch", meta, autoload=True) idx_batch_user_id = Index("idx_batch_user_id", batch.c.user_id) idx_batch_user_id.drop(migrate_engine) batch.c.user_id.drop()
def downgrade(migrate_engine): metadata.bind = migrate_engine metadata.reflect() i = Index("ix_hda_ta_history_dataset_association_id", HistoryDatasetAssociationTagAssociation_table.c.history_dataset_association_id) try: i.drop() except Exception: log.exception("Removing index 'ix_hdata_history_dataset_association_id' to table 'history_dataset_association_tag_association' table failed.")
def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) index = Index(INDEX_NAME, instances.c.host, instances.c.node, instances.c.deleted) index.drop(migrate_engine)
def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine t = Table('migrations', meta, autoload=True) i = Index('migrations_by_host_and_status_idx', t.c.deleted, t.c.source_compute, t.c.dest_compute, t.c.status) i.drop(migrate_engine)
def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine t = Table('fixed_ips', meta, autoload=True) i = Index('fixed_ips_deleted_allocated_idx', t.c.address, t.c.deleted, t.c.allocated) i.drop(migrate_engine)
def downgrade(migrate_engine): metadata.bind = migrate_engine metadata.reflect() i = Index( "ix_hda_ta_history_dataset_association_id", HistoryDatasetAssociationTagAssociation_table.c.history_dataset_association_id ) try: i.drop() except Exception as e: print(str(e)) log.debug( "Removing index 'ix_hdata_history_dataset_association_id' to table 'history_dataset_association_tag_association' table failed: %s" % str( e ) )
def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) load_tables = dict((table_name, Table(table_name, meta, autoload=True)) for table_name in INDEXES.keys()) for table_name, indexes in INDEXES.items(): table = load_tables[table_name] for index_name, column in indexes: index = Index(index_name, table.c[column]) index.drop()
def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine for table in ['instance_metadata', 'security_group_instance_association']: t = Table(table, meta, autoload=True) i = Index('%s_instance_uuid_idx' % table, t.c.instance_uuid) i.drop(migrate_engine)
def _check_field_index(conn, fields, name_prefix, filter_expression, should_exist=True, concurrently=False, replace_existing=False, index_type=None): """ Check the status of a given index: add or remove it as needed """ if index_type is None: if len(fields) > 1: raise ValueError('Must specify index type for composite indexes.') index_type = fields[0].postgres_index_type field_name = '_'.join([f.name.lower() for f in fields]) # Our normal indexes start with "ix_", dynamic indexes with "dix_" index_name = 'dix_{prefix}_{field_name}'.format(prefix=name_prefix.lower(), field_name=field_name) # Previous naming scheme legacy_name = 'dix_field_{prefix}_dataset_{field_name}'.format( prefix=name_prefix.lower(), field_name=field_name, ) indexed_expressions = [f.alchemy_expression for f in fields] index = Index( index_name, *indexed_expressions, postgresql_where=filter_expression, postgresql_using=index_type, # Don't lock the table (in the future we'll allow indexing new fields...) postgresql_concurrently=concurrently) exists = _pg_exists(conn, tables.schema_qualified(index_name)) legacy_exists = _pg_exists(conn, tables.schema_qualified(legacy_name)) # This currently leaves a window of time without indexes: it's primarily intended for development. if replace_existing or (not should_exist): if exists: _LOG.debug('Dropping index: %s (replace=%r)', index_name, replace_existing) index.drop(conn) exists = False if legacy_exists: _LOG.debug('Dropping legacy index: %s (replace=%r)', legacy_name, replace_existing) Index(legacy_name, *indexed_expressions).drop(conn) legacy_exists = False if should_exist: if not (exists or legacy_exists): _LOG.info('Creating index: %s', index_name) index.create(conn) else: _LOG.debug('Index exists: %s (replace=%r)', index_name, replace_existing)
def upgrade(migrate_engine): utils.drop_unique_constraint(migrate_engine, TABLE_NAME, OLD_UC_NAME, OLD_COLUMN) meta = MetaData(bind=migrate_engine) t = Table(TABLE_NAME, meta, autoload=True) if migrate_engine.name == "mysql": index = Index(OLD_COLUMN, t.c[OLD_COLUMN], unique=True) index.drop() uc = UniqueConstraint(*COLUMNS, table=t, name=UC_NAME) uc.create()
def downgrade(migrate_engine): metadata.bind = migrate_engine if migrate_engine.name == 'mysql': # Load existing tables metadata.reflect() i = Index("ix_hdadaa_history_dataset_association_id", HistoryDatasetAssociationDisplayAtAuthorization_table.c.history_dataset_association_id) try: i.drop() except Exception: log.exception("Removing index 'ix_hdadaa_history_dataset_association_id' from table 'history_dataset_association_display_at_authorization' table failed.")
def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine for table in ['block_device_mapping', 'consoles', 'volumes']: t = Table(table, meta, autoload=True) i = Index('%s_instance_uuid_idx' % table, t.c.instance_uuid) i.drop(migrate_engine)
def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine jobs = Table("jobs", meta, autoload=True) index = Index(INDEX_STATUS_NAME, jobs.c.status) index.drop(migrate_engine) index = Index(INDEX_TIMEOUT_NAME, jobs.c.timeout) index.drop(migrate_engine)
def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine t = Table('iscsi_targets', meta, autoload=True) i = Index('iscsi_targets_host_idx', t.c.host) i.drop(migrate_engine) i = Index('iscsi_targets_host_volume_id_deleted_idx', t.c.host, t.c.volume_id, t.c.deleted) i.drop(migrate_engine)
def _drop_new_index(migrations, migrate_engine): if migrate_engine.name == "mysql": sql = ("drop index migrations_by_host_nodes_and_status_idx on " "migrations") migrate_engine.execute(sql) else: i = Index('migrations_by_host_nodes_and_status_idx', migrations.c.deleted, migrations.c.source_compute, migrations.c.dest_compute, migrations.c.source_node, migrations.c.dest_node, migrations.c.status) i.drop(migrate_engine)