def upgrade(migrate_engine): metadata.bind = migrate_engine # dummy definitions to satisfy foreign keys Table('instance', metadata, autoload=True) Table('group', metadata, autoload=True) # add the column for the polymorphic identity # we have to use 'nullable=True' cause the values are # null when the column is created type_col = Column('type', String(40), nullable=True) type_col.create(badge_table) # fill column with the right values select = badge_table.select().with_only_columns( ['id', 'title', 'badge_delegateable', 'badge_delegateable_category']) badges_query_result = migrate_engine.execute(select) for values in badges_query_result: (id_, title, delegateable, category) = values if category: type_ = CATEGORY_BADGE elif delegateable: type_ = DELEGATEABLE_BADGE else: type_ = USER_BADGE update = badge_table.update().values(type=type_).where( badge_table.c.id == id_) migrate_engine.execute(update) # drop the old columns badge_table.c.badge_delegateable.drop() badge_table.c.badge_delegateable_category.drop() type_col.alter(nullable=False)
def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine volumes = Table('volumes', meta, autoload=True) attach_string = Column('attachtime_string', String(255)) attach_string.create(volumes) old_attachtime = volumes.c.attach_time try: volumes_list = list(volumes.select().execute()) for v in volumes_list: attach_time = select([volumes.c.attach_time], volumes.c.id == v['id']) volumes.update().\ where(volumes.c.id == v['id']).\ values(attach_string=attach_time).execute() except Exception: attach_datetime.drop() raise old_attachtime.alter(name='attach_time_old') attach_string.alter(name='attach_time') old_attachtime.drop()
def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine volumes = Table('volumes', meta, autoload=True) attach_string = Column('attachtime_string', String(255)) attach_string.create(volumes) old_attachtime = volumes.c.attach_time try: volumes_list = list(volumes.select().execute()) for v in volumes_list: attach_time = select([volumes.c.attach_time], volumes.c.id == v['id']).execute().fetchone()[0] volumes.update().\ where(volumes.c.id == v['id']).\ values(attachtime_string=attach_time).execute() except Exception: attach_string.drop() raise old_attachtime.alter(name='attach_time_old') attach_string.alter(name='attach_time') old_attachtime.drop()
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) types = {} for instance in migrate_engine.execute(instances.select()): if instance.instance_type_id is None: types[instance.id] = None continue try: types[instance.id] = int(instance.instance_type_id) except ValueError: LOG.warn("Instance %s did not have instance_type_id " "converted to an integer because its value is %s" % (instance.id, instance.instance_type_id)) types[instance.id] = None integer_column = Column('instance_type_id_int', Integer(), nullable=True) string_column = instances.c.instance_type_id integer_column.create(instances) for instance_id, instance_type_id in types.iteritems(): update = instances.update().\ where(instances.c.id == instance_id).\ values(instance_type_id_int=instance_type_id) migrate_engine.execute(update) string_column.alter(name='instance_type_id_str') integer_column.alter(name='instance_type_id') string_column.drop()
def upgrade(migrate_engine): metadata.bind = migrate_engine group_table = Table('group', metadata, autoload=True) user_table = Table('user', metadata, autoload=True) # nullable=False does not work with sqlite even with default=... # and .create(populate_default=True). Alter with nullable=True # and change afterwards. description = Column('description', Unicode(255), default=u'', nullable=True) description.create(badge_table, populate_default=True) description.alter(nullable=False) group_id = Column('group_id', Integer, ForeignKey('group.id', ondelete="CASCADE")) group_id.create(badge_table) display_group = Column('display_group', Boolean, default=False) display_group.create(badge_table) q = migrate_engine.execute(badge_table.select()) for (id, _, _, _, group_id, _, _, _) in q: update_statement = badge_table.update( badge_table.c.id == id, {'group_id': group_id, 'description': '', 'display_group': True}) migrate_engine.execute(update_statement) badge_table.c.group.drop()
def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) integer_column = instances.c.instance_type_id string_column = Column('instance_type_id_str', String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False), nullable=True) types = {} for instance in migrate_engine.execute(instances.select()): if instance.instance_type_id is None: types[instance.id] = None else: types[instance.id] = str(instance.instance_type_id) string_column.create(instances) for instance_id, instance_type_id in types.iteritems(): update = instances.update().\ where(instances.c.id == instance_id).\ values(instance_type_id_str=instance_type_id) migrate_engine.execute(update) integer_column.alter(name='instance_type_id_int') string_column.alter(name='instance_type_id') integer_column.drop()
def downgrade(migrate_engine): meta.bind = migrate_engine instances = Table('instances', meta, autoload=True, autoload_with=migrate_engine) integer_column = instances.c.instance_type_id string_column = Column('instance_type_id_str', String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False), nullable=True) types = {} for instance in migrate_engine.execute(instances.select()): if instance.instance_type_id is None: types[instance.id] = None else: types[instance.id] = str(instance.instance_type_id) string_column.create(instances) for instance_id, instance_type_id in types.iteritems(): update = instances.update().\ where(instances.c.id == instance_id).\ values(instance_type_id_str=instance_type_id) migrate_engine.execute(update) integer_column.alter(name='instance_type_id_int') string_column.alter(name='instance_type_id') integer_column.drop()
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine volumes = Table('volumes', meta, autoload=True) attach_datetime = Column('attachtime_datetime', DateTime(timezone=False)) attach_datetime.create(volumes) old_attachtime = volumes.c.attach_time try: volumes_list = list(volumes.select().execute()) for v in volumes_list: attach_time = select([volumes.c.attach_time], volumes.c.id == v['id']).execute().fetchone()[0] volumes.update().\ where(volumes.c.id == v['id']).\ values(attachtime_datetime=attach_time).execute() except Exception: attach_datetime.drop() raise old_attachtime.alter(name='attach_time_old') attach_datetime.alter(name='attach_time') old_attachtime.drop()
def upgrade(migrate_engine): meta.bind = migrate_engine instances = Table('instances', meta, autoload=True, autoload_with=migrate_engine) types = {} for instance in migrate_engine.execute(instances.select()): if instance.instance_type_id is None: types[instance.id] = None continue try: types[instance.id] = int(instance.instance_type_id) except ValueError: logging.warn("Instance %s did not have instance_type_id " "converted to an integer because its value is %s" % (instance.id, instance.instance_type_id)) types[instance.id] = None integer_column = Column('instance_type_id_int', Integer(), nullable=True) string_column = instances.c.instance_type_id integer_column.create(instances) for instance_id, instance_type_id in types.iteritems(): update = instances.update().\ where(instances.c.id == instance_id).\ values(instance_type_id_int=instance_type_id) migrate_engine.execute(update) string_column.alter(name='instance_type_id_str') integer_column.alter(name='instance_type_id') string_column.drop()
def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) layer = Table('layer', meta, schema=schema, autoload=True) c = Column('timeMode', types.Enum('disabled', 'single', 'range', native_enum=False), default='disabled') c.create(layer, populate_default=True) c.alter(nullable=False)
def _update_col(column, table, data_type, columns): """ Update the column based on the database operation. :param column: Base column. :type column: BaseColumn :param columns: Existing column names in the database for the given table. :type columns: list :returns: SQLAlchemy column object. :rtype: Column """ from stdm.data.configuration.columns import BoundsColumn alchemy_column = Column(column.name, data_type, **_base_col_attrs(column)) idx_name = None if column.index: idx_name = u'idx_{0}_{1}'.format(column.entity.name, column.name) unique_name = None if column.unique: unique_name = u'unq_{0}_{1}'.format(column.entity.name, column.name) if column.action == DbItem.CREATE: # Ensure the column does not exist otherwise an exception will be thrown if not column.name in columns: alchemy_column.create(table=table, index_name=idx_name, unique_name=unique_name) # Create check constraints accordingly if isinstance(column, BoundsColumn) and \ column.can_create_check_constraints(): # Create check constraint if need be chk_const = check_constraint(column, alchemy_column, table) if not chk_const is None: chk_const.create() elif column.action == DbItem.ALTER: # Ensure the column exists before altering if column.name in columns: col_attrs = _base_col_attrs(column) col_attrs['table'] = table alchemy_column.alter(**col_attrs) elif column.action == DbItem.DROP: # Ensure the column exists before dropping if column.name in columns: _clear_ref_in_entity_relations(column) # Use drop cascade command drop_cascade_column(column.entity.name, column.name) #alchemy_column.drop(table=table) # Ensure column is added to the table if alchemy_column.table is None: alchemy_column._set_parent(table) return alchemy_column
def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) layer = Table('layer', meta, schema=schema, autoload=True) c = Column('timeMode', types.Enum( 'disabled', 'single', 'range', native_enum=False), default='disabled') c.create(layer, populate_default=True) c.alter(nullable=False)
def upgrade(migrate_engine): # use sqlalchemy-migrate database connection metadata.bind = migrate_engine # autoload needed tables instance_table = Table("instance", metadata, autoload=True) # add hierachical columns to the table select_child_desc = Column("select_child_description", Unicode(255), default=u"", nullable=True) parent = Column("parent_id", Integer, ForeignKey("badge.id", ondelete="CASCADE"), nullable=True) # create/recreate the table select_child_desc.create(badge_table) select_child_desc.alter(nullable=False) parent.create(badge_table)
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # add recovery_attempts to kube_app table kube_app = Table('kube_app', meta, Column('id', Integer, primary_key=True), mysql_engine=ENGINE, mysql_charset=CHARSET, autoload=True) col = Column('recovery_attempts', Integer, nullable=True, default=0) col.create(kube_app) col.alter(nullable=False)
def upgrade(migrate_engine): metadata = MetaData(bind=migrate_engine) # In order to add a column a nullable=False: # 1) Add the column with nullable=True change_type = Column("change_type", String(50)) change_type.create(Table("rules_scheduled_changes", metadata, autoload=True)) # 2) Update the values of change_type depending on base_data_version migrate_engine.execute( """ UPDATE rules_scheduled_changes SET change_type = "insert" WHERE base_data_version is NULL; """ ) migrate_engine.execute( """ UPDATE rules_scheduled_changes SET change_type = "update" WHERE base_data_version is not NULL; """ ) # 3) Alter the column and set nullable=False change_type.alter(nullable=False) change_type = Column("change_type", String(50)) change_type.create(Table("rules_scheduled_changes_history", metadata, autoload=True)) migrate_engine.execute( """ UPDATE rules_scheduled_changes_history SET change_type = "insert" WHERE base_data_version is NULL; """ ) migrate_engine.execute( """ UPDATE rules_scheduled_changes_history SET change_type = "update" WHERE base_data_version is not NULL; """ ) rules_scheduled_changes = Table("rules_scheduled_changes", metadata, autoload=True) rules_scheduled_changes.c.base_update_type.alter(nullable=True)
def _update_col(column, table, data_type, columns): """ Update the column based on the database operation. :param column: Base column. :type column: BaseColumn :returns: SQLAlchemy column object. :rtype: Column :param columns: Existing column names in the database for the given table. :type columns: list """ alchemy_column = Column(column.name, data_type, **_base_col_attrs(column)) idx_name = None if column.index: idx_name = u'idx_{0}_{1}'.format(column.entity.name, column.name) unique_name = None if column.unique: unique_name = u'unq_{0}_{1}'.format(column.entity.name, column.name) if column.action == DbItem.CREATE: #Ensure the column does not exist otherwise an exception will be thrown if not column.name in columns: alchemy_column.create(table=table, index_name=idx_name, unique_name=unique_name) elif column.action == DbItem.ALTER: #Ensure the column exists before altering if column.name in columns: col_attrs = _base_col_attrs(column) col_attrs['table'] = table alchemy_column.alter(**col_attrs) elif column.action == DbItem.DROP: #Ensure the column exists before dropping if column.name in columns: _clear_ref_in_entity_relations(column) alchemy_column.drop(table=table) #Ensure column is added to the table if alchemy_column.table is None: alchemy_column._set_parent(table) return alchemy_column
def upgrade(migrate_engine): # use sqlalchemy-migrate database connection metadata.bind = migrate_engine # autoload needed tables instance_table = Table('instance', metadata, autoload=True) # add hierachical columns to the table select_child_desc = Column('select_child_description', Unicode(255), default=u'', nullable=True) parent = Column('parent_id', Integer, ForeignKey('badge.id', ondelete="CASCADE"), nullable=True) # create/recreate the table select_child_desc.create(badge_table) select_child_desc.alter(nullable=False) parent.create(badge_table)
def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine instance_types = Table('instance_types', meta, autoload=True) integer_column = Column('flavorid_int', Integer()) integer_column.create(instance_types) try: # NOTE(bcwaldon): This catches a bug with python-migrate # failing to add the unique constraint try: migrate.UniqueConstraint(integer_column).create() except migrate.changeset.NotSupportedError: LOG.info("Failed to add unique constraint on flavorid") pass string_column = instance_types.c.flavorid instance_types_rows = list(instance_types.select().execute()) for instance_type in instance_types_rows: flavorid_str = instance_type.flavorid try: flavorid_int = int(instance_type.flavorid) except ValueError: msg = _('Could not cast flavorid to integer: %s. ' 'Set flavorid to an integer-like string to downgrade.') LOG.error(msg % instance_type.flavorid) raise instance_types.update()\ .where(string_column == flavorid_str)\ .values(flavorid_int=flavorid_int)\ .execute() except Exception: integer_column.drop() raise string_column.alter(name='flavorid_str') integer_column.alter(name='flavorid') string_column.drop()
def upgrade(migrate_engine): metadata = MetaData(bind=migrate_engine) # In order to add a column a nullable=False: # 1) Add the column with nullable=True change_type = Column("change_type", String(50)) change_type.create( Table("rules_scheduled_changes", metadata, autoload=True)) # 2) Update the values of change_type depending on base_data_version migrate_engine.execute(""" UPDATE rules_scheduled_changes SET change_type = "insert" WHERE base_data_version is NULL; """) migrate_engine.execute(""" UPDATE rules_scheduled_changes SET change_type = "update" WHERE base_data_version is not NULL; """) # 3) Alter the column and set nullable=False change_type.alter(nullable=False) change_type = Column("change_type", String(50)) change_type.create( Table("rules_scheduled_changes_history", metadata, autoload=True)) migrate_engine.execute(""" UPDATE rules_scheduled_changes_history SET change_type = "insert" WHERE base_data_version is NULL; """) migrate_engine.execute(""" UPDATE rules_scheduled_changes_history SET change_type = "update" WHERE base_data_version is not NULL; """) rules_scheduled_changes = Table("rules_scheduled_changes", metadata, autoload=True) rules_scheduled_changes.c.base_update_type.alter(nullable=True)
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine instance_types = Table('instance_types', meta, autoload=True) string_column = Column('flavorid_str', String(255)) string_column.create(instance_types) try: # NOTE(bcwaldon): This catches a bug with python-migrate # failing to add the unique constraint try: migrate.UniqueConstraint(string_column).create() except migrate.changeset.NotSupportedError: LOG.error("Failed to add unique constraint on flavorid") pass # NOTE(bcwaldon): this is a hack to preserve uniqueness constraint # on existing 'name' column try: migrate.UniqueConstraint(instance_types.c.name).create() except Exception: pass integer_column = instance_types.c.flavorid instance_type_rows = list(instance_types.select().execute()) for instance_type in instance_type_rows: flavorid_int = instance_type.flavorid instance_types.update()\ .where(integer_column == flavorid_int)\ .values(flavorid_str=str(flavorid_int))\ .execute() except Exception: string_column.drop() raise integer_column.alter(name='flavorid_int') string_column.alter(name='flavorid') integer_column.drop()
def _update_col(column, table, data_type, columns): """ Update the column based on the database operation. :param column: Base column. :type column: BaseColumn :param columns: Existing column names in the database for the given table. :type columns: list :returns: SQLAlchemy column object. :rtype: Column """ from stdm.data.configuration.columns import BoundsColumn alchemy_column = Column(column.name, data_type, **_base_col_attrs(column)) idx_name = None if column.index: idx_name = u'idx_{0}_{1}'.format(column.entity.name, column.name) unique_name = None if column.unique: unique_name = u'unq_{0}_{1}'.format(column.entity.name, column.name) if column.action == DbItem.CREATE: # Ensure the column does not exist otherwise an exception will be thrown if not column.name in columns: alchemy_column.create( table=table, unique_name=unique_name ) # Create check constraints accordingly if isinstance(column, BoundsColumn) and \ column.can_create_check_constraints(): # Create check constraint if need be chk_const = check_constraint( column, alchemy_column, table ) if not chk_const is None: chk_const.create() elif column.action == DbItem.ALTER: # Ensure the column exists before altering if column.name in columns: col_attrs = _base_col_attrs(column) col_attrs['table'] = table alchemy_column.alter(**col_attrs) elif column.action == DbItem.DROP: # Ensure the column exists before dropping if column.name in columns: _clear_ref_in_entity_relations(column) # Use drop cascade command drop_cascade_column(column.entity.name, column.name) # Ensure column is added to the table if alchemy_column.table is None: alchemy_column._set_parent(table) # add different type of index for columns with index if column.index: _bind_metadata(metadata) inspector = reflection.Inspector.from_engine(metadata.bind) indexes_list = inspector.get_indexes(column.entity.name) indexes = [i['name'] for i in indexes_list if not i['unique']] # get_indexes do not get gist indexes so try/ except needs to be used. try: if idx_name not in indexes: if column.TYPE_INFO == 'GEOMETRY': idx = Index(idx_name, alchemy_column, postgresql_using='gist') idx.create() else: idx = Index(idx_name, alchemy_column, postgresql_using='btree') idx.create() except Exception: pass return alchemy_column
def _update_col(column, table, data_type, columns): """ Update the column based on the database operation. :param column: Base column. :type column: BaseColumn :param columns: Existing column names in the database for the given table. :type columns: list :returns: SQLAlchemy column object. :rtype: Column """ from stdm.data.configuration.columns import BoundsColumn alchemy_column = Column(column.name, data_type, **_base_col_attrs(column)) idx_name = None if column.index: idx_name = u'idx_{0}_{1}'.format(column.entity.name, column.name) unique_name = None if column.unique: unique_name = u'unq_{0}_{1}'.format(column.entity.name, column.name) if column.action == DbItem.CREATE: # Ensure the column does not exist otherwise an exception will be thrown if not column.name in columns: alchemy_column.create(table=table, unique_name=unique_name) # Create check constraints accordingly if isinstance(column, BoundsColumn) and \ column.can_create_check_constraints(): # Create check constraint if need be chk_const = check_constraint(column, alchemy_column, table) if not chk_const is None: chk_const.create() elif column.action == DbItem.ALTER: # Ensure the column exists before altering if column.name in columns: col_attrs = _base_col_attrs(column) col_attrs['table'] = table alchemy_column.alter(**col_attrs) elif column.action == DbItem.DROP: # Ensure the column exists before dropping if column.name in columns: _clear_ref_in_entity_relations(column) # Use drop cascade command drop_cascade_column(column.entity.name, column.name) # Ensure column is added to the table if alchemy_column.table is None: alchemy_column._set_parent(table) # add different type of index for columns with index if column.index: _bind_metadata(metadata) inspector = reflection.Inspector.from_engine(metadata.bind) indexes_list = inspector.get_indexes(column.entity.name) indexes = [i['name'] for i in indexes_list if not i['unique']] # get_indexes do not get gist indexes so try/ except needs to be used. try: if idx_name not in indexes: if column.TYPE_INFO == 'GEOMETRY': idx = Index(idx_name, alchemy_column, postgresql_using='gist') idx.create() else: idx = Index(idx_name, alchemy_column, postgresql_using='btree') idx.create() except Exception: pass return alchemy_column