def downgrade(): if has_table("event_logs"): with op.batch_alter_table("event_logs") as batch_op: batch_op.alter_column( "step_key", type_=sa.String, existing_type=sa.Text, ) batch_op.alter_column( "asset_key", type_=sa.String, existing_type=sa.Text, ) batch_op.alter_column( "partition", type_=sa.String, existing_type=sa.Text, ) if has_table("secondary_indexes"): with op.batch_alter_table("secondary_indexes") as batch_op: batch_op.alter_column( "name", type_=sa.String, existing_type=sa.Text, ) if has_table("asset_keys"): with op.batch_alter_table("asset_keys") as batch_op: batch_op.alter_column( "asset_key", type_=sa.String, existing_type=sa.Text, )
def upgrade(): if not has_table("runs"): return if not has_table("snapshots"): op.create_table( "snapshots", sa.Column("id", sa.Integer, primary_key=True, autoincrement=True, nullable=False), sa.Column("snapshot_id", sa.String(255), unique=True, nullable=False), sa.Column("snapshot_body", sa.LargeBinary, nullable=False), sa.Column("snapshot_type", sa.String(63), nullable=False), ) if not has_column("runs", "snapshot_id"): op.add_column( "runs", sa.Column("snapshot_id", sa.String(255), sa.ForeignKey("snapshots.snapshot_id")), )
def downgrade(): if has_table("runs"): with op.batch_alter_table("runs") as batch_op: batch_op.alter_column( "pipeline_name", type_=sa.String, existing_type=sa.Text, ) batch_op.alter_column("run_body", type_=sa.String, existing_type=sa.Text) batch_op.alter_column("partition", type_=sa.String, existing_type=sa.Text) batch_op.alter_column( "partition_set", type_=sa.String, existing_type=sa.Text, ) if has_table("secondary_indexes"): with op.batch_alter_table("secondary_indexes") as batch_op: batch_op.alter_column( "name", type_=sa.String, existing_type=sa.Text, ) if has_table("run_tags"): with op.batch_alter_table("run_tags") as batch_op: batch_op.alter_column("key", type_=sa.String, existing_type=sa.Text) batch_op.alter_column("value", type_=sa.String, existing_type=sa.Text)
def downgrade(): if has_table("snapshots"): op.drop_table("snapshots") if not has_table("runs"): return if has_column("runs", "snapshot_id"): op.drop_column("runs", "snapshot_id")
def upgrade(): instance = get_currently_upgrading_instance() if instance.scheduler: instance.scheduler.wipe(instance) if has_table("schedules"): op.drop_table("schedules") if has_table("schedule_ticks"): op.drop_table("schedule_ticks")
def downgrade(): if has_table("jobs"): with op.batch_alter_table("jobs") as batch_op: batch_op.alter_column("job_body", type_=sa.String, existing_type=sa.Text) if has_table("job_ticks"): with op.batch_alter_table("job_ticks") as batch_op: batch_op.alter_column( "tick_body", type_=sa.String, existing_type=sa.Text, )
def upgrade(): if has_table("secondary_indexes"): with op.batch_alter_table("secondary_indexes") as batch_op: batch_op.alter_column( "name", type_=MySQLCompatabilityTypes.UniqueText, existing_type=sa.Text, ) if has_table("asset_keys"): with op.batch_alter_table("asset_keys") as batch_op: batch_op.alter_column("asset_key", type_=MySQLCompatabilityTypes.UniqueText, existing_type=sa.Text)
def downgrade(): if has_column("runs", "snapshot_id"): with op.batch_alter_table("runs") as batch_op: batch_op.drop_column("snapshot_id") if has_table("snapshots"): op.drop_table("snapshots")
def upgrade(): if not has_table("snapshots"): op.create_table( "snapshots", sa.Column("id", sa.Integer, primary_key=True, autoincrement=True, nullable=False), sa.Column("snapshot_id", sa.String(255), unique=True, nullable=False), sa.Column("snapshot_body", sa.LargeBinary, nullable=False), sa.Column("snapshot_type", sa.String(63), nullable=False), ) if not has_column("runs", "snapshot_id"): # Sqlite does not support adding foreign keys to existing # tables, so we are forced to fallback on this witchcraft. # See https://alembic.sqlalchemy.org/en/latest/batch.html#dealing-with-referencing-foreign-keys # for additional context with op.batch_alter_table("runs") as batch_op: batch_op.execute("PRAGMA foreign_keys = OFF;") batch_op.add_column( sa.Column( "snapshot_id", sa.String(255), sa.ForeignKey( "snapshots.snapshot_id", name="fk_runs_snapshot_id_snapshots_snapshot_id"), ), ) op.execute("PRAGMA foreign_keys = ON;")
def downgrade(): if has_column('runs', 'snapshot_id'): with op.batch_alter_table('runs') as batch_op: batch_op.drop_column('snapshot_id') if has_table('snapshots'): op.drop_table('snapshots')
def downgrade(): if has_table("event_logs"): with op.batch_alter_table("event_logs") as batch_op: batch_op.alter_column("step_key", type_=sa.String, existing_type=sa.Text) batch_op.alter_column("asset_key", type_=sa.String, existing_type=sa.Text) batch_op.alter_column("partition", type_=sa.String, existing_type=sa.Text) if has_table("secondary_indexes"): with op.batch_alter_table("secondary_indexes") as batch_op: batch_op.alter_column("name", type_=sa.String, existing_type=sa.Text) if has_table("asset_keys"): with op.batch_alter_table("asset_keys") as batch_op: batch_op.alter_column("asset_key", type_=sa.String, existing_type=sa.Text) if has_table("runs"): with op.batch_alter_table("runs") as batch_op: batch_op.alter_column("pipeline_name", type_=sa.String, existing_type=sa.Text) batch_op.alter_column("run_body", type_=sa.String, existing_type=sa.Text) batch_op.alter_column("partition", type_=sa.String, existing_type=sa.Text) batch_op.alter_column("partition_set", type_=sa.String, existing_type=sa.Text) if has_table("run_tags"): with op.batch_alter_table("run_tags") as batch_op: batch_op.alter_column("key", type_=sa.String, existing_type=sa.Text) batch_op.alter_column("value", type_=sa.String, existing_type=sa.Text) if has_table("jobs"): with op.batch_alter_table("jobs") as batch_op: batch_op.alter_column("job_body", type_=sa.String, existing_type=sa.Text) if has_table("job_ticks"): with op.batch_alter_table("job_ticks") as batch_op: batch_op.alter_column("tick_body", type_=sa.String, existing_type=sa.Text)
def upgrade(): instance = get_currently_upgrading_instance() if instance.scheduler: instance.scheduler.wipe(instance) # No longer dropping the "schedules" table here, since # the 0.10.0 migration checks for the presence of the "schedules" # table during the migration from the "schedules" table to the "jobs" # table - see create_0_10_0_schedule_ tables # if has_table("schedules"): # op.drop_table("schedules") if has_table("schedule_ticks"): op.drop_table("schedule_ticks")
def upgrade(): if not has_table('snapshots'): op.create_table( 'snapshots', sa.Column('id', sa.Integer, primary_key=True, autoincrement=True, nullable=False), sa.Column('snapshot_id', sa.String(255), unique=True, nullable=False), sa.Column('snapshot_body', sa.LargeBinary, nullable=False), sa.Column('snapshot_type', sa.String(63), nullable=False), ) if not has_column('runs', 'snapshot_id'): op.add_column( 'runs', sa.Column('snapshot_id', sa.String(255), sa.ForeignKey('snapshots.snapshot_id')), )
def downgrade(): if has_table("schedules"): op.drop_table("schedules") if has_table("schedule_ticks"): op.drop_table("schedule_ticks")
def downgrade(): if has_table('schedules'): op.drop_table('schedules') if has_table('schedule_ticks'): op.drop_table('schedule_ticks')
def downgrade(): if has_column('runs', 'snapshot_id'): op.drop_column('runs', 'snapshot_id') if has_table('snapshots'): op.drop_table('snapshots')