def upgrade(): tasks = table('tasks', column('status', sa.String)) op.execute( tasks.update().\ where(tasks.c.status == op.inline_literal('completed')).\ values({'status': op.inline_literal('closed')}) )
def upgrade(): op.add_column('lu_population_number', sa.Column('name_ro', sa.UnicodeText, nullable=True)) op.add_column('lu_population_units_restricted', sa.Column('name_ro', sa.UnicodeText, nullable=True)) lu_pop_codes = table('lu_population_number', column('code', sa.String), column('name_ro', sa.UnicodeText)) lu_pop_restrict_codes = table('lu_population_units_restricted', column('code', sa.String), column('name_ro', sa.UnicodeText)) for code, name_ro in DATA: op.execute( lu_pop_codes.update() .where(lu_pop_codes.c.code == op.inline_literal(code)) .values({'name_ro': op.inline_literal(name_ro)})) op.execute( lu_pop_restrict_codes.update() .where(lu_pop_restrict_codes.c.code == op.inline_literal(code)) .values({'name_ro': op.inline_literal(name_ro)}))
def upgrade(): op.execute( frameworks.insert(). values({'name': op.inline_literal('Inoket Framework'), 'framework': op.inline_literal('inoket-1'), 'slug': op.inline_literal('inoket-1'), 'status': op.inline_literal('open'), 'clarification_questions_open': op.inline_literal(False), })) lot_table = table( 'lots', column('name', String), column('slug', String), column('one_service_limit', Boolean) ) op.bulk_insert(lot_table, [ {'name': 'Supply Teachers', 'slug': 'supply_teachers', 'one_service_limit': False} ]) conn = op.get_bind() res = conn.execute("SELECT id FROM frameworks WHERE slug = 'inoket-1'") framework = list(res.fetchall()) # Add more lots like this when necessary, just supply teachers for now res = conn.execute("SELECT id FROM lots WHERE slug in ('supply_teachers')") lots = list(res.fetchall()) if len(framework) == 0: raise Exception("Framework not found") for lot in lots: op.execute("INSERT INTO framework_lots (framework_id, lot_id) VALUES({}, {})".format( framework[0]["id"], lot["id"]))
def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.add_column('archived_services', sa.Column('status', sa.String(), nullable=True)) op.add_column('services', sa.Column('status', sa.String(), nullable=True)) op.create_check_constraint( "ck_services_status", "services", "status in ('disabled', 'enabled', 'published')" ) op.create_check_constraint( "ck_archived_services_status", "archived_services", "status in ('disabled', 'enabled', 'published')" ) services = table('services', column('status', String)) archived_services = table('archived_services', column('status', String)) op.execute( services.update(). \ values({'status': op.inline_literal('enabled')}) ) op.execute( archived_services.update(). \ values({'status': op.inline_literal('enabled')}) )
def upgrade(): op.execute('SET search_path TO mineturer') conn = op.get_bind() res = conn.execute('select userid, password from users') results = res.fetchall() users = table( 'users', column('bcrypt_pwd', sa.String), column('userid', sa.Integer) ) for result in results: userid = result[0] pwd = result[1] bcrypt_pwd = bcrypt.hashpw( pwd.encode('utf-8'), bcrypt.gensalt() ) op.execute( users.update().where( users.c.userid == op.inline_literal(userid) ).values({ 'bcrypt_pwd': op.inline_literal(bcrypt_pwd)} ) ) op.execute('SET search_path TO public')
def upgrade_location_titles(): from slugify import slugify location_table = sa.sql.table( 'location', sa.sql.column('id'), sa.sql.column('name'), sa.sql.column('title'), sa.sql.column('active'), ) duplicates = {} locations_query = ( location_table.select() .order_by(location_table.c.active.desc())) conn = op.get_bind() for location in conn.execute(locations_query): found = duplicates[location.id] = duplicates.get(location.id, 0) + 1 # Prefer active to not have duplicate count if not location.active and found > 1: title = '{0} - {1}'.formate(location.title, found) else: title = location.title op.execute( location_table.update() .where(location_table.c.id == op.inline_literal(location.id)) .values(name=op.inline_literal(slugify(title))) )
def upgrade(): team = sa.sql.table('team', sa.Column('id', sa.Integer(), nullable=False), sa.Column('full_name', sa.String(length=75), nullable=True), sa.Column('short_name', sa.String(length=50), nullable=True), sa.Column('flair', sa.String(length=50), nullable=True), sa.Column('nickname', sa.String(length=50), nullable=True), sa.Column('conference', sa.String(length=50), nullable=True)) op.execute( team.update().\ where(team.c.id==107).\ values({'full_name':op.inline_literal('University of Wisconsin-Green Bay'), 'short_name':op.inline_literal('Green Bay')}) ) op.execute( team.update().\ where(team.c.id==168).\ values({'full_name':op.inline_literal('University of Wisconsin-Milwaukee')}) ) op.execute( team.update().\ where(team.c.id==317).\ values({'full_name':op.inline_literal('University of Wisconsin-Madison')}) ) op.execute( team.update().\ where(team.c.id==111).\ values({'full_name':op.inline_literal('University of Hawaii at Manoa')}) )
def upgrade(): add_column( 'tab_data', Column('label', String), ) tab = table( 'tab_data', Column('name', String), Column('label', String), ) data = [ ('loga', 'Loga'), ('scores', 'Wszystkie Mecze'), ('group_a', 'Grupa A'), ('group_b', 'Grupa B'), ('finals', 'FinaĆy'), ] for name, label in data: execute( tab.update() .where(tab.c.name == inline_literal(name)) .values({'label': inline_literal(label)}) )
def downgrade(): conn = op.get_bind() sel = ( meeting_type_table.select() .where(meeting_type_table.c.default == op.inline_literal(True)) .with_only_columns(['slug']) ) [default_meeting_type] = conn.execute(sel).fetchone() sel = ( meeting_table.select() .where(meeting_table.c.meeting_type == op.inline_literal(default_meeting_type)) .with_only_columns(['title_id']) ) [title_id] = conn.execute(sel).fetchone() conn.execute( meeting_table.delete() .where(meeting_table.c.meeting_type == op.inline_literal(default_meeting_type)) ) conn.execute( translation_table.delete() .where(translation_table.c.id == op.inline_literal(title_id)) ) conn.execute( meeting_type_table.delete() .where(meeting_type_table.c.slug == op.inline_literal(default_meeting_type)) )
def downgrade(engine_name): hazardtype = HazardType.__table__ op.execute( hazardtype.update() \ .where(hazardtype.c.mnemonic==op.inline_literal('DG')) \ .values({'title': op.inline_literal('Drought')})) pass
def downgrade(): execute( migrate_table.insert().\ values({ 'repository_id': inline_literal(u'MediaCore Migrations'), 'version': inline_literal(57), }) )
def downgrade(): op.execute('SET FOREIGN_KEY_CHECKS = 0') op.execute(roles_table.delete().where( roles_table.c.name.in_( [ op.inline_literal('Reader'), op.inline_literal('ObjectEditor'), op.inline_literal('ProgramCreator'), ])))
def insert_permission(permission_name, description): execute( permissions.insert().\ values({ 'permission_name': inline_literal(permission_name), 'description': inline_literal(description), }) )
def add_group(group_name, display_name): execute( groups.insert().\ values({ 'group_name': inline_literal(group_name), 'display_name': inline_literal(display_name), }) )
def insert_setting(key, value): execute( settings.insert().\ values({ 'key': inline_literal(key), 'value': inline_literal(value), }) )
def downgrade(): op.execute('SET FOREIGN_KEY_CHECKS = 0') op.execute(roles_table.delete().where( roles_table.c.name.in_( [ op.inline_literal('ProgramOwner'), op.inline_literal('ProgramEditor'), op.inline_literal('ProgramReader'), op.inline_literal('RoleReader'), ]))) op.add_column('users_roles', sa.Column('target_context_id', sa.Integer))
def upgrade(): op.add_column('county', sa.Column('code', sa.Text(), nullable=True)) county = table('county', column('code', sa.Text), column('name', sa.Text)) for code, name in CODES: op.execute( county.update() .where(county.c.name == op.inline_literal(name)) .values({'code': op.inline_literal(code)}) )
def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.add_column('observation', sa.Column(u'alleles', sa.INTEGER(), nullable=True)) # https://alembic.readthedocs.org/en/latest/ops.html#alembic.operations.Operations.execute observation = sql.table('observation', sql.column('alleles', sa.INTEGER()), sql.column('zygosity', sa.Enum('heterozygous', 'homozygous', name='zygosity'))) op.execute(observation.update().where(observation.c.zygosity == op.inline_literal('heterozygous')).values({'alleles': 1})) op.execute(observation.update().where(observation.c.zygosity == op.inline_literal('homozygous')).values({'alleles': 2})) op.drop_column('observation', 'zygosity')
def downgrade(): op.execute( group.update().where( group.c.direction == DIR_A).values( {'direction': op.inline_literal(u'true')})) op.execute( group.update().where( group.c.direction == DIR_B).values( {'direction': op.inline_literal(u'false')})) op.execute('ALTER TABLE "group" ' 'ALTER direction TYPE boolean ' 'USING (direction::boolean)')
def upgrade(): op.alter_column( table_name='group', column_name='direction', type_=sa.Unicode()) op.execute( group.update().where( group.c.direction == 'true').values( {'direction': op.inline_literal(DIR_A)})) op.execute( group.update().where( group.c.direction == 'false').values( {'direction': op.inline_literal(DIR_B)}))
def upgrade(): op.add_column('lu_measures', sa.Column('name_ro', sa.UnicodeText, nullable=True)) lu_measures_codes = table('lu_measures', column('code', sa.String), column('name_ro', sa.UnicodeText)) for code, name_ro in DATA: op.execute( lu_measures_codes.update() .where(lu_measures_codes.c.code == op.inline_literal(code)) .values({'name_ro': op.inline_literal(name_ro)}))
def downgrade(): config = sa.sql.table( 'config', sa.sql.column('objectid'), sa.sql.column('value'), ) op.execute( config.delete(). where( (config.c.objectid == op.inline_literal('REPORTING_BEGIN')) | (config.c.objectid == op.inline_literal('REPORTING_END')) ))
def upgrade(): # Workaround for Alemic bug #89 # https://bitbucket.org/zzzeek/alembic/issue/89 with op.batch_alter_table("tasks") as batch_op: batch_op.add_column(sa.Column("new_status", sa.String(36), default=consts.TaskStatus.INIT)) with op.batch_alter_table("subtasks") as batch_op: batch_op.add_column(sa.Column("new_status", sa.String(36), default=consts.SubtaskStatus.RUNNING)) op.execute( task.update() .where(task.c.status.in_(WITHOUT_CHANGES)) .values({"new_status": task.c.status})) for old, new in OLD_TO_NEW: op.execute( task.update() .where(task.c.status == op.inline_literal(old)) .values({"new_status": new})) # NOTE(rvasilets): Assume that set_failed was used only in causes of # validation failed op.execute( task.update().where( (task.c.status == op.inline_literal("failed")) & (task.c.validation_result == {})).values( {"new_status": "crashed", "validation_result": {}})) op.execute( task.update().where( (task.c.status == op.inline_literal("failed")) & (task.c.validation_result != {})).values( {"new_status": "validation_failed", "validation_result": task.c.validation_result})) op.drop_index("task_status", "tasks") op.drop_index("subtask_status", "subtasks") # NOTE(boris-42): Statuses "setting up", "cleaning up" were not used with op.batch_alter_table("tasks") as batch_op: batch_op.drop_column("status") batch_op.alter_column("new_status", new_column_name="status", existing_type=sa.String(36)) with op.batch_alter_table("subtasks") as batch_op: batch_op.drop_column("status") batch_op.alter_column("new_status", new_column_name="status", existing_type=sa.String(36)) op.create_index("task_status", "tasks", ["status"]) op.create_index("subtask_status", "subtasks", ["status"])
def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.create_table("build_chroot", sa.Column("mock_chroot_id", sa.Integer(), nullable=False), sa.Column("build_id", sa.Integer(), nullable=False), sa.Column("status", sa.Integer(), nullable=True), sa.ForeignKeyConstraint(["build_id"], ["build.id"], ), sa.ForeignKeyConstraint( ["mock_chroot_id"], ["mock_chroot.id"], ), sa.PrimaryKeyConstraint("mock_chroot_id", "build_id") ) # transfer data from build table to build_chroot metadata = sa.MetaData() # just what we need of copr table build_table = sa.Table("build", metadata, sa.Column("chroots", sa.Text()), sa.Column("status", sa.Integer()), sa.Column("id", sa.Integer()), ) mc_table = sa.Table("mock_chroot", metadata, sa.Column("id", sa.Integer(), nullable=False), sa.Column( "os_release", sa.String(length=50), nullable=False), sa.Column( "os_version", sa.String(length=50), nullable=False), sa.Column( "arch", sa.String(length=50), nullable=False), sa.Column("is_active", sa.Boolean(), nullable=False), ) bc_table = sa.Table("build_chroot", metadata, sa.Column( "mock_chroot_id", sa.Integer(), nullable=False), sa.Column("build_id", sa.Integer(), nullable=False), sa.Column("status", sa.Integer(), nullable=True), ) for row in op.get_bind().execute(sa.select([build_table.c.id, build_table.c.chroots, build_table.c.status])): for c in row[1].split(" "): chroot_array = c.split("-") for row2 in (op.get_bind().execute(sa.select([mc_table.c.id], sa.and_( mc_table.c.os_release == op.inline_literal(chroot_array[0]), mc_table.c.os_version == op.inline_literal(chroot_array[1]), mc_table.c.arch == op.inline_literal(chroot_array[2]), )))): # should be just one row op.bulk_insert( bc_table, [{"mock_chroot_id": row2[0], "build_id": row[0], "status": row[2]}]) # drop old columns op.drop_column(u"build", u"status") op.drop_column(u"build", u"chroots")
def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.add_column('services', sa.Column('updated_by', sa.String(), nullable=True)) op.add_column('services', sa.Column('updated_reason', sa.String(), nullable=True)) services = table('services', column('updated_by', String), column('updated_reason', String) ) op.execute( services.update(). \ values({'updated_by': op.inline_literal('import'), 'updated_reason': op.inline_literal('initial import')}) )
def upgrade(): op.add_column('type_heb', sa.Column('type_heb_type', sa.String())) type_heb = table('type_heb', column('type_heb_code', sa.String), column('type_heb_type', sa.String)) CHAMBRES = ['CH', 'MH', 'CHECR'] GITES = ['GR', 'GF', 'MT', 'GC', 'MV', 'GRECR', 'GG'] op.execute( type_heb.update(). where(type_heb.c.type_heb_code.in_(CHAMBRES)). values({'type_heb_type': op.inline_literal('chambre')})) op.execute( type_heb.update(). where(type_heb.c.type_heb_code.in_(GITES)). values({'type_heb_type': op.inline_literal('gites')}))
def test_auto_literals(self): context = op_fixture(as_sql=True, literal_binds=True) from sqlalchemy.sql import table, column from sqlalchemy import String, Integer account = table("account", column("name", String), column("id", Integer)) op.execute( account.update() .where(account.c.name == op.inline_literal("account 1")) .values({"name": op.inline_literal("account 2")}) ) op.execute(text("update table set foo=:bar").bindparams(bar="bat")) context.assert_( "UPDATE account SET name='account 2' " "WHERE account.name = 'account 1'", "update table set foo='bat'" )
def upgrade(): account = table('facility_types', column('name', String) ) changes = [ (u'Isidrettshall (ishall/skĂžytebane)', u'Ishall'), (u'UtendĂžrs friidrettsanlegg', u'Friidrettsanlegg'), (u'UtendĂžrs skianlegg', u'Skianlegg') ] for (old, new) in changes: op.execute( account.update().\ where(account.c.name==op.inline_literal(old)).\ values({'name':op.inline_literal(new)}) )
def upgrade_site_locations(): op.add_column('location', sa.Column( 'is_enabled', sa.Boolean(), nullable=False, server_default=sa.sql.false() )) location_table = sa.sql.table( 'location', sa.sql.column('id'), sa.sql.column('name'), sa.sql.column('is_enabled'), ) url = context.config.get_main_option('sqlalchemy.url') # ID's are more reliable in this case.... if 'cctg' in url: pre_enabled = [1, 57, 73, 73, 75, 76] elif 'mhealth' in url: pre_enabled = [1] elif 'aeh' in url: pre_enabled = [56, 57] else: pre_enabled = [] pre_enabled = [op.inline_literal(i) for i in pre_enabled] if pre_enabled: op.execute( location_table.update() .where(location_table.c.id.in_(pre_enabled)) .values(is_enabled=sa.sql.true()))
def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.add_column( 'users', sa.Column('course_id', sa.Unicode(length=32), nullable=False, server_default=u'sea401d2')) op.alter_column('users', 'course_id', server_default=None) op.execute( fake_users.update().where( fake_users.c.username == op.inline_literal(u'cewing') ).values( {'course_id': op.inline_literal(u'all')} ) )
def downgrade(): op.drop_index('tasks_graphs__execution_fk_name_visibility_idx', table_name='tasks_graphs') op.drop_index('node_instances_state_visibility_idx', table_name='node_instances') op.drop_index('logs_node_id_visibility_execution_fk_idx', table_name='logs') op.drop_index('executions_dep_fk_isw_vis_tenant_id_idx', table_name='executions') op.drop_index('events_node_id_visibility_idx', table_name='events') op.drop_index('deployments__sife_fk_visibility_idx', table_name='deployments') op.drop_index(op.f('tasks_graphs_name_idx'), table_name='tasks_graphs') op.drop_index(op.f('node_instances_state_idx'), table_name='node_instances') op.drop_index(op.f('logs_node_id_idx'), table_name='logs') op.drop_index(op.f('executions_is_system_workflow_idx'), table_name='executions') op.drop_index(op.f('events_node_id_idx'), table_name='events') op.drop_index(op.f('users_tenants_user_id_idx'), table_name='users_tenants') op.drop_index(op.f('users_tenants_tenant_id_idx'), table_name='users_tenants') op.drop_index(op.f('users_tenants_role_id_idx'), table_name='users_tenants') op.drop_index(op.f('tasks_graphs_visibility_idx'), table_name='tasks_graphs') op.drop_index(op.f('tasks_graphs__execution_fk_idx'), table_name='tasks_graphs') op.drop_index(op.f('tasks_graphs__creator_id_idx'), table_name='tasks_graphs') op.drop_index(op.f('snapshots_visibility_idx'), table_name='snapshots') op.drop_index(op.f('snapshots__creator_id_idx'), table_name='snapshots') op.drop_index(op.f('sites_visibility_idx'), table_name='sites') op.drop_index(op.f('sites__creator_id_idx'), table_name='sites') op.drop_index(op.f('secrets_visibility_idx'), table_name='secrets') op.drop_index(op.f('secrets__creator_id_idx'), table_name='secrets') op.drop_index(op.f('rabbitmq_brokers__ca_cert_id_idx'), table_name='rabbitmq_brokers') op.drop_index(op.f('plugins_updates_visibility_idx'), table_name='plugins_updates') op.drop_index( op.f('plugins_updates__temp_blueprint_fk_idx'), table_name='plugins_updates', ) op.drop_index( op.f('plugins_updates__original_blueprint_fk_idx'), table_name='plugins_updates', ) op.drop_index(op.f('plugins_updates__execution_fk_idx'), table_name='plugins_updates') op.drop_index(op.f('plugins_updates__creator_id_idx'), table_name='plugins_updates') op.drop_index(op.f('plugins_visibility_idx'), table_name='plugins') op.drop_index(op.f('plugins__creator_id_idx'), table_name='plugins') op.drop_index(op.f('operations_visibility_idx'), table_name='operations') op.drop_index(op.f('operations__tasks_graph_fk_idx'), table_name='operations') op.drop_index(op.f('operations__creator_id_idx'), table_name='operations') op.drop_index(op.f('nodes_visibility_idx'), table_name='nodes') op.drop_index(op.f('nodes__creator_id_idx'), table_name='nodes') op.drop_index(op.f('node_instances_visibility_idx'), table_name='node_instances') op.drop_index(op.f('node_instances__creator_id_idx'), table_name='node_instances') op.drop_index(op.f('managers__ca_cert_id_idx'), table_name='managers') op.drop_index(op.f('logs_visibility_idx'), table_name='logs') op.drop_index(op.f('logs__creator_id_idx'), table_name='logs') op.drop_index(op.f('groups_tenants_tenant_id_idx'), table_name='groups_tenants') op.drop_index(op.f('groups_tenants_role_id_idx'), table_name='groups_tenants') op.drop_index(op.f('groups_tenants_group_id_idx'), table_name='groups_tenants') op.drop_index(op.f('executions_visibility_idx'), table_name='executions') op.drop_index(op.f('executions__deployment_fk_idx'), table_name='executions') op.drop_index(op.f('executions__creator_id_idx'), table_name='executions') op.drop_index(op.f('events_visibility_idx'), table_name='events') op.drop_index(op.f('events__creator_id_idx'), table_name='events') op.drop_index(op.f('deployments_visibility_idx'), table_name='deployments') op.drop_index(op.f('deployments__site_fk_idx'), table_name='deployments') op.drop_index(op.f('deployments__creator_id_idx'), table_name='deployments') op.drop_index(op.f('deployments__blueprint_fk_idx'), table_name='deployments') op.drop_index( op.f('deployment_updates_visibility_idx'), table_name='deployment_updates', ) op.drop_index( op.f('deployment_updates__old_blueprint_fk_idx'), table_name='deployment_updates', ) op.drop_index( op.f('deployment_updates__new_blueprint_fk_idx'), table_name='deployment_updates', ) op.drop_index( op.f('deployment_updates__execution_fk_idx'), table_name='deployment_updates', ) op.drop_index( op.f('deployment_updates__deployment_fk_idx'), table_name='deployment_updates', ) op.drop_index( op.f('deployment_updates__creator_id_idx'), table_name='deployment_updates', ) op.drop_index( op.f('deployment_update_steps_visibility_idx'), table_name='deployment_update_steps', ) op.drop_index( op.f('deployment_update_steps__deployment_update_fk_idx'), table_name='deployment_update_steps', ) op.drop_index( op.f('deployment_update_steps__creator_id_idx'), table_name='deployment_update_steps', ) op.drop_index( op.f('deployment_modifications_visibility_idx'), table_name='deployment_modifications', ) op.drop_index( op.f('deployment_modifications__deployment_fk_idx'), table_name='deployment_modifications', ) op.drop_index( op.f('deployment_modifications__creator_id_idx'), table_name='deployment_modifications', ) op.drop_index(op.f('config__updater_id_idx'), table_name='config') op.drop_index(op.f('certificates__updater_id_idx'), table_name='certificates') op.drop_index(op.f('blueprints_visibility_idx'), table_name='blueprints') op.drop_index(op.f('blueprints__creator_id_idx'), table_name='blueprints') op.drop_index(op.f('agents_visibility_idx'), table_name='agents') op.drop_index(op.f('agents__node_instance_fk_idx'), table_name='agents') op.drop_index(op.f('agents__creator_id_idx'), table_name='agents') op.drop_index(op.f('executions_token_idx'), table_name='executions') op.drop_index(op.f('executions_ended_at_idx'), table_name='executions') op.drop_index(op.f('nodes__deployment_fk_idx'), table_name='nodes') op.drop_index(op.f('node_instances__node_fk_idx'), table_name='node_instances') op.drop_column('deployment_updates', 'runtime_only_evaluation') op.drop_column('deployments', 'runtime_only_evaluation') op.drop_column('executions', 'blueprint_id') op.drop_column('node_instances', 'index') op.execute(config_table.delete().where( (config_table.c.name == op.inline_literal('ldap_ca_path')) & (config_table.c.scope == op.inline_literal('rest')))) op.drop_constraint(op.f('rabbitmq_brokers_node_id_key'), 'rabbitmq_brokers', type_='unique') op.drop_column('rabbitmq_brokers', 'node_id') op.drop_column('rabbitmq_brokers', 'is_external') op.drop_constraint(op.f('managers_node_id_key'), 'managers', type_='unique') op.drop_column('managers', 'node_id') op.drop_index(op.f('managers_last_seen_idx'), table_name='managers') op.drop_column('managers', 'last_seen') op.drop_column('managers', 'status_report_frequency') op.drop_table('db_nodes') op.drop_constraint( 'users_roles_pkey', 'users_roles', )
def downgrade(): op.execute(person_t.update().\ values({'name_en': op.inline_literal('')}) )
def upgrade(): op.execute( frameworks.insert(). values({'name': op.inline_literal('G-Cloud 7'), 'framework': op.inline_literal('gcloud'), 'status': op.inline_literal('pending')}))
def upgrade(): events = table('events', column('name', String), column('eventFormat', String)) op.execute(events.update().where( events.c.name == op.inline_literal('FMC')).values( {'eventFormat': op.inline_literal('Bo3')}))
def upgrade(): op.execute(dag.update().where( dag.c.default_view == op.inline_literal('tree')).values( {'default_view': op.inline_literal('grid')}))
def delete_setting(key): execute( settings.delete().\ where(settings.c.key==inline_literal(key)) )
def downgrade(): """Downgrade database schema and/or data back to the previous revision.""" # migrate notification_types in notifications with types being deleted # day_ -> month_ delete_types_list = [ "day_cycle_task_due_in", "day_workflow_starts_in", ] cycle_types_list = [ "quarterly_cycle_task_due_in", "annually_cycle_task_due_in", ] workflow_types_list = [ "quarterly_workflow_starts_in", "annually_workflow_starts_in", ] op.execute(notifications.update().where( notifications.c.notification_type_id.in_( sa.sql.select([notification_types.c.id]).where( notification_types.c.name.in_(cycle_types_list)))).values({ "notification_type_id": sa.sql.select([ notification_types.c.id ]).where(notification_types.c.name == op.inline_literal( 'month_cycle_task_due_in')), })) op.execute(notifications.update().where( notifications.c.notification_type_id.in_( sa.sql.select([notification_types.c.id]).where( notification_types.c.name.in_(workflow_types_list)))).values({ "notification_type_id": sa.sql.select([ notification_types.c.id ]).where(notification_types.c.name == op.inline_literal( 'month_workflow_starts_in')), })) # then delete day_ notification_types op.execute(notification_types.delete().where( notification_types.c.name.in_(delete_types_list))) # modify weekly, monthly notification_types to week, month op.execute(notification_types.update().where( notification_types.c.name == op.inline_literal( 'week_cycle_task_due_in')).values( {'name': op.inline_literal('weekly_cycle_task_due_in')})) op.execute(notification_types.update().where( notification_types.c.name == op.inline_literal( 'month_cycle_task_due_in')).values( {'name': op.inline_literal('monthly_cycle_task_due_in')})) op.execute(notification_types.update().where( notification_types.c.name == op.inline_literal( 'week_workflow_starts_in')).values({ 'name': op.inline_literal('weekly_workflow_starts_in'), 'template': op.inline_literal('weekly_workflow_starts_in') })) op.execute(notification_types.update().where( notification_types.c.name == op.inline_literal( 'month_workflow_starts_in')).values({ 'name': op.inline_literal('monthly_workflow_starts_in'), 'template': op.inline_literal('monthly_workflow_starts_in') })) # migrate notification_types in notifications with types being deleted # we can not recover data precisely after once upgraded, because # users could create workflows which do not correspond to old types # month_ -> monthly_ cycle_types_list = [ "month_cycle_task_due_in", ] workflow_types_list = [ "month_workflow_starts_in", ] op.execute(notifications.update().where( notifications.c.notification_type_id.in_( sa.sql.select([notification_types.c.id]).where( notification_types.c.name.in_(cycle_types_list)))).values({ "notification_type_id": sa.sql.select([ notification_types.c.id ]).where(notification_types.c.name == op.inline_literal( 'monthly_cycle_task_due_in')), })) op.execute(notifications.update().where( notifications.c.notification_type_id.in_( sa.sql.select([notification_types.c.id]).where( notification_types.c.name.in_(workflow_types_list)))).values({ "notification_type_id": sa.sql.select([ notification_types.c.id ]).where(notification_types.c.name == op.inline_literal( 'monthly_workflow_starts_in')), })) # delete deprecated notification_types op.execute(notification_types.delete().where( notification_types.c.name.in_(cycle_types_list + workflow_types_list)))
"instant": False, }, { # workflow starts in notification "name": "day_workflow_starts_in", "description": "Advanced notification for a recurring workflow.", "template": "day_workflow_starts_in", "advance_notice": 1, "instant": False, }, ]) # modify weekly, monthly notification_types to week, month op.execute(notification_types.update().where( notification_types.c.name == op.inline_literal( 'weekly_cycle_task_due_in')).values( {'name': op.inline_literal('week_cycle_task_due_in')})) op.execute(notification_types.update().where( notification_types.c.name == op.inline_literal( 'monthly_cycle_task_due_in')).values( {'name': op.inline_literal('month_cycle_task_due_in')})) op.execute(notification_types.update().where( notification_types.c.name == op.inline_literal( 'weekly_workflow_starts_in')).values({ 'name': op.inline_literal('week_workflow_starts_in'), 'template': op.inline_literal('week_workflow_starts_in') })) op.execute(notification_types.update().where( notification_types.c.name == op.inline_literal(
def upgrade(): op.add_column('shopping_category', sa.Column('order', sa.Integer)) shopping_categories = table( 'shopping_category', sa.Column('id', sa.Integer, primary_key=True), sa.Column('name', sa.Unicode(75), nullable=False), sa.Column('daily_limit', sa.Integer, nullable=True), sa.Column('monthly_limit', sa.Integer, nullable=False), sa.Column('family_wide', sa.Boolean, nullable=False), sa.Column('order', sa.Integer, nullable=False) ) op.bulk_insert( shopping_categories, [ {'id': 7, 'name': 'Accessories', 'daily_limit': 2, 'monthly_limit': 8, 'family_wide': False, 'order': 3}, {'id': 8, 'name': 'Socks/Underwear', 'daily_limit': 1, 'monthly_limit': 4, 'family_wide': False, 'order': 5}, {'id': 9, 'name': 'Toys', 'daily_limit': 1, 'monthly_limit': 4, 'family_wide': False, 'order': 6}, {'id': 10, 'name': 'Books', 'daily_limit': 2, 'monthly_limit': 8, 'family_wide': False, 'order': 7}, {'id': 11, 'name': 'Seasonal', 'daily_limit': 1, 'monthly_limit': 4, 'family_wide': False, 'order': 9}, ]) op.execute( shopping_categories.update(). where(shopping_categories.c.name == op.inline_literal('Clothing')). values({'order': op.inline_literal('1')}) ) op.execute( shopping_categories.update(). where(shopping_categories.c.name == op.inline_literal('Household')). values({'order': op.inline_literal('2'), 'daily_limit': op.inline_literal('2'), 'monthly_limit': op.inline_literal('2')}) ) op.execute( shopping_categories.update(). where(shopping_categories.c.name == op.inline_literal('Shoes')). values({'order': op.inline_literal('4'), 'daily_limit': op.inline_literal('1'), 'monthly_limit': op.inline_literal('4')}) ) op.execute( shopping_categories.update(). where(shopping_categories.c.name == op.inline_literal('Baby')). values({'order': op.inline_literal('8'), 'daily_limit': op.inline_literal('1'), 'monthly_limit': op.inline_literal('4')}) ) op.execute( shopping_categories.delete(). where(shopping_categories.c.name == op.inline_literal('Coats')) ) op.execute( shopping_categories.delete(). where(shopping_categories.c.name == op.inline_literal('Other')) )
def upgrade(active_plugins=None, options=None): if not migration.should_run(active_plugins, migration_for_plugins): return # Table definitions below are only used for sqlalchemy to generate # SQL statements, so in networks/ports tables only required field # are declared. Note that 'quantum_id' in OFC ID mapping tables # will be renamed in a later patch (bug 1287432). ofctenants = sa_expr.table('ofctenants', sa_expr.column('id'), sa_expr.column('quantum_id')) ofcnetworks = sa_expr.table('ofcnetworks', sa_expr.column('id'), sa_expr.column('quantum_id')) ofcports = sa_expr.table('ofcports', sa_expr.column('id'), sa_expr.column('quantum_id')) ofcfilters = sa_expr.table('ofcfilters', sa_expr.column('id'), sa_expr.column('quantum_id')) ofctenantmappings = sa_expr.table('ofctenantmappings', sa_expr.column('ofc_id'), sa_expr.column('quantum_id')) ofcnetworkmappings = sa_expr.table('ofcnetworkmappings', sa_expr.column('ofc_id'), sa_expr.column('quantum_id')) ofcportmappings = sa_expr.table('ofcportmappings', sa_expr.column('ofc_id'), sa_expr.column('quantum_id')) ofcfiltermappings = sa_expr.table('ofcfiltermappings', sa_expr.column('ofc_id'), sa_expr.column('quantum_id')) networks = sa_expr.table('networks', sa_expr.column('id'), sa_expr.column('tenant_id')) ports = sa_expr.table('ports', sa_expr.column('id'), sa_expr.column('network_id')) # ofctenants -> ofctenantmappings select_obj = sa.select([ ofctenants.c.quantum_id, op.inline_literal('/tenants/') + ofctenants.c.id ]) stmt = InsertFromSelect( [ofctenantmappings.c.quantum_id, ofctenantmappings.c.ofc_id], select_obj) op.execute(stmt) # ofcnetworks -> ofcnetworkmappings select_obj = ofcnetworks.join(networks, ofcnetworks.c.quantum_id == networks.c.id) select_obj = select_obj.join( ofctenantmappings, ofctenantmappings.c.quantum_id == networks.c.tenant_id) select_obj = sa.select([ ofcnetworks.c.quantum_id, (ofctenantmappings.c.ofc_id + op.inline_literal('/networks/') + ofcnetworks.c.id) ], from_obj=select_obj) stmt = InsertFromSelect( [ofcnetworkmappings.c.quantum_id, ofcnetworkmappings.c.ofc_id], select_obj) op.execute(stmt) # ofcports -> ofcportmappings select_obj = ofcports.join(ports, ofcports.c.quantum_id == ports.c.id) select_obj = select_obj.join( ofcnetworkmappings, ofcnetworkmappings.c.quantum_id == ports.c.network_id) select_obj = sa.select([ ofcports.c.quantum_id, (ofcnetworkmappings.c.ofc_id + op.inline_literal('/ports/') + ofcports.c.id) ], from_obj=select_obj) stmt = InsertFromSelect( [ofcportmappings.c.quantum_id, ofcportmappings.c.ofc_id], select_obj) op.execute(stmt) # ofcfilters -> ofcfiltermappings select_obj = sa.select([ ofcfilters.c.quantum_id, op.inline_literal('/filters/') + ofcfilters.c.id ]) stmt = InsertFromSelect( [ofcfiltermappings.c.quantum_id, ofcfiltermappings.c.ofc_id], select_obj) op.execute(stmt) # drop old mapping tables op.drop_table('ofctenants') op.drop_table('ofcnetworks') op.drop_table('ofcports') op.drop_table('ofcfilters')
def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.create_table( "build_chroot", sa.Column("mock_chroot_id", sa.Integer(), nullable=False), sa.Column("build_id", sa.Integer(), nullable=False), sa.Column("status", sa.Integer(), nullable=True), sa.ForeignKeyConstraint( ["build_id"], ["build.id"], ), sa.ForeignKeyConstraint( ["mock_chroot_id"], ["mock_chroot.id"], ), sa.PrimaryKeyConstraint("mock_chroot_id", "build_id")) # transfer data from build table to build_chroot metadata = sa.MetaData() # just what we need of copr table build_table = sa.Table( "build", metadata, sa.Column("chroots", sa.Text()), sa.Column("status", sa.Integer()), sa.Column("id", sa.Integer()), ) mc_table = sa.Table( "mock_chroot", metadata, sa.Column("id", sa.Integer(), nullable=False), sa.Column("os_release", sa.String(length=50), nullable=False), sa.Column("os_version", sa.String(length=50), nullable=False), sa.Column("arch", sa.String(length=50), nullable=False), sa.Column("is_active", sa.Boolean(), nullable=False), ) bc_table = sa.Table( "build_chroot", metadata, sa.Column("mock_chroot_id", sa.Integer(), nullable=False), sa.Column("build_id", sa.Integer(), nullable=False), sa.Column("status", sa.Integer(), nullable=True), ) for row in op.get_bind().execute( sa.select([ build_table.c.id, build_table.c.chroots, build_table.c.status ])): for c in row[1].split(" "): chroot_array = c.split("-") for row2 in (op.get_bind().execute( sa.select([mc_table.c.id], sa.and_( mc_table.c.os_release == op.inline_literal( chroot_array[0]), mc_table.c.os_version == op.inline_literal( chroot_array[1]), mc_table.c.arch == op.inline_literal( chroot_array[2]), )))): # should be just one row op.bulk_insert(bc_table, [{ "mock_chroot_id": row2[0], "build_id": row[0], "status": row[2] }]) # drop old columns op.drop_column(u"build", u"status") op.drop_column(u"build", u"chroots")
def remove_new_config_entries(): op.execute(config_table.delete().where( config_table.c.name.in_(NEW_LDAP_CONFIG_ENTRIES) & (config_table.c.scope == op.inline_literal('rest'))))
"""creating default user accounts
def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.alter_column('course', 'isStandard', existing_type=mysql.TINYINT(display_width=1), type_=sa.Boolean(), existing_nullable=True) op.alter_column('event', 'isOnCalendar', existing_type=mysql.TINYINT(display_width=1), type_=sa.Boolean(), existing_nullable=True) op.alter_column('service', 'isCalendarBlocked', existing_type=mysql.TINYINT(display_width=1), type_=sa.Boolean(), existing_nullable=True) op.alter_column('sponsor', 'isLogoReceived', existing_type=mysql.TINYINT(display_width=1), type_=sa.Boolean(), existing_nullable=True) op.alter_column('sponsor', 'isSponsorThankedFB', existing_type=mysql.TINYINT(display_width=1), type_=sa.Boolean(), existing_nullable=True) op.alter_column('sponsor', 'isWebsiteUpdated', existing_type=mysql.TINYINT(display_width=1), type_=sa.Boolean(), existing_nullable=True) op.add_column('sponsorlevel', sa.Column('treatment', sa.String(length=32), nullable=True)) op.alter_column('sponsorlevel', 'display', existing_type=mysql.TINYINT(display_width=1), type_=sa.Boolean(), existing_nullable=True) op.alter_column('sponsorrace', 'display', existing_type=mysql.TINYINT(display_width=1), type_=sa.Boolean(), existing_nullable=True) op.alter_column('sponsorrace', 'isRDCertified', existing_type=mysql.TINYINT(display_width=1), type_=sa.Boolean(), existing_nullable=True) op.alter_column('sponsortag', 'isBuiltIn', existing_type=mysql.TINYINT(display_width=1), type_=sa.Boolean(), existing_nullable=True) op.alter_column('tag', 'isBuiltIn', existing_type=mysql.TINYINT(display_width=1), type_=sa.Boolean(), existing_nullable=True) op.alter_column('user', 'active', existing_type=mysql.TINYINT(display_width=1), type_=sa.Boolean(), existing_nullable=True) # ### end Alembic commands ### sponsorlevel = table( 'sponsorlevel', column('treatment', sa.String()), ) op.execute( sponsorlevel.update().\ values({'treatment':op.inline_literal('summarize')}) )
def downgrade(): for name in names: op.execute(categories_table.delete().where( categories_table.columns.name == op.inline_literal(name)))
def downgrade(engine_name): for htype in ['UF', 'WF', 'AP']: op.execute(hazardcategory.delete().where( hazardcategory.c.hazardtype_id == sa.select( [hazardtype.c.id], hazardtype.c.mnemonic == op.inline_literal( htype))))
def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table( 'sponsortag', sa.Column('id', sa.Integer(), nullable=False), sa.Column('tag', sa.String(length=30), nullable=True), sa.Column('description', sa.String(length=512), nullable=True), sa.Column('isBuiltIn', sa.Boolean(), nullable=True), sa.Column('version_id', sa.Integer(), nullable=False), sa.PrimaryKeyConstraint('id')) op.alter_column('course', 'isStandard', existing_type=mysql.TINYINT(display_width=1), type_=sa.Boolean(), existing_nullable=True) op.alter_column('event', 'isOnCalendar', existing_type=mysql.TINYINT(display_width=1), type_=sa.Boolean(), existing_nullable=True) op.alter_column('service', 'isCalendarBlocked', existing_type=mysql.TINYINT(display_width=1), type_=sa.Boolean(), existing_nullable=True) op.alter_column('sponsor', 'isLogoReceived', existing_type=mysql.TINYINT(display_width=1), type_=sa.Boolean(), existing_nullable=True) op.alter_column('sponsor', 'isSponsorThankedFB', existing_type=mysql.TINYINT(display_width=1), type_=sa.Boolean(), existing_nullable=True) op.alter_column('sponsor', 'isWebsiteUpdated', existing_type=mysql.TINYINT(display_width=1), type_=sa.Boolean(), existing_nullable=True) op.alter_column('sponsorlevel', 'display', existing_type=mysql.TINYINT(display_width=1), type_=sa.Boolean(), existing_nullable=True) op.add_column('sponsorrace', sa.Column('display', sa.Boolean(), nullable=True)) op.alter_column('sponsorrace', 'isRDCertified', existing_type=mysql.TINYINT(display_width=1), type_=sa.Boolean(), existing_nullable=True) op.alter_column('tag', 'isBuiltIn', existing_type=mysql.TINYINT(display_width=1), type_=sa.Boolean(), existing_nullable=True) op.alter_column('user', 'active', existing_type=mysql.TINYINT(display_width=1), type_=sa.Boolean(), existing_nullable=True) # ### end Alembic commands ### sponsor = table( 'sponsorrace', column('display', sa.Boolean()), ) op.execute( sponsor.update().\ values({'display':op.inline_literal(True)}) )
def _get_role_id(role_name): """ Return a SELECT statement that retrieves a role ID from a role name """ return sa.select([roles.c.id ]).where(roles.c.name == op.inline_literal(role_name))
def downgrade(): op.execute(questions.update().where( questions.c.type == op.inline_literal('multi_choice')).values( {'type': op.inline_literal('multichoice')}))
"instant": False, }, { # workflow starts in notification "name": "day_workflow_starts_in", "description": "Advanced notification for a recurring workflow.", "template": "day_workflow_starts_in", "advance_notice": 1, "instant": False, }, ] ) # modify weekly, monthly notification_types to week, month op.execute( notification_types.update(). where(notification_types.c.name == op.inline_literal('weekly_cycle_task_due_in')). values({'name': op.inline_literal('week_cycle_task_due_in')}) ) op.execute( notification_types.update(). where(notification_types.c.name == op.inline_literal('monthly_cycle_task_due_in')). values({'name': op.inline_literal('month_cycle_task_due_in')}) ) op.execute( notification_types.update(). where(notification_types.c.name == op.inline_literal('weekly_workflow_starts_in')). values({'name': op.inline_literal('week_workflow_starts_in'), 'template': op.inline_literal('week_workflow_starts_in')}) )
def upgrade(): op.execute(node.update().where(node.c.provision_state == null()).values( {'provision_state': op.inline_literal(AVAILABLE)}))
def upgrade_aliquot_type_units(): op.add_column('aliquottype', sa.Column('units', sa.String())) op.add_column('aliquot', sa.Column('amount', sa.Numeric())) aliquot_type_table = sa.sql.table( 'aliquottype', sa.sql.column('id'), sa.sql.column('name'), sa.sql.column('units')) aliquot_table = sa.sql.table( 'aliquot', sa.sql.column('aliquot_type_id'), sa.sql.column('cell_amount'), sa.sql.column('volume'), sa.sql.column('amount')) types = { 'cell_amount': { 'units': op.inline_literal(u'x10^6'), 'column': aliquot_table.c.cell_amount, }, 'volume': { 'units': op.inline_literal(u'mL'), 'column': aliquot_table.c.volume, }, 'each': { 'units': op.inline_literal(u'ea'), 'column': sa.case([ ((aliquot_table.c.cell_amount > op.inline_literal(0)), aliquot_table.c.cell_amount), ((aliquot_table.c.volume > op.inline_literal(0)), aliquot_table.c.volume) ]) }, } columns = { u'blood-spot': types['each'], u'csf': types['volume'], u'csfpellet': types['each'], u'gscells': types['cell_amount'], u'gsplasma': types['volume'], u'heparin-plasma': types['volume'], u'lymphoid': types['each'], u'pbmc': types['cell_amount'], u'plasma': types['volume'], u'rs-gut': types['each'], u'serum': types['volume'], u'swab': types['each'], u'ti-gut': types['each'], u'urine': types['volume'], u'wb-plasma': types['volume'], u'whole-blood': types['volume'], } op.execute( aliquot_type_table.update() .values( units=sa.case(value=aliquot_type_table.c.name, whens=[ (column_name, type_['units']) for column_name, type_ in columns.items() ], else_=sa.null())) ) op.execute( aliquot_table.update() .where(aliquot_type_table.c.id == aliquot_table.c.aliquot_type_id) .values( # Using the units of measure, pull in the correct value amount=sa.case(value=aliquot_type_table.c.name, whens=[ (column_name, type_['column']) for column_name, type_ in columns.items() ], else_=sa.null())) ) op.alter_column('aliquottype', 'units', nullable=False) op.drop_column('aliquot', 'cell_amount') op.drop_column('aliquot', 'volume')
def downgrade(): bind = op.get_bind() delete_seed = status_table.delete().where(status_table.c.id == op.inline_literal(6) ) bind.execute(delete_seed)
def _operation(from_, to): op.execute(table.update().where( table.c.engine == op.inline_literal(from_)).values( {'engine': op.inline_literal(to)}))
def downgrade(): op.execute(questions.update(). where(questions.c.type == op.inline_literal('single_choice_grid')). values({'type': op.inline_literal('rating_group')}))
def delete_permission(permission_name): execute( permissions.delete().\ where(permissions.c.permission_name==inline_literal(permission_name)) )
def upgrade(): bible_versions = table('bible_versions', column('id', sa.Integer), column('command', sa.String), column('name', sa.String), column('abbr', sa.String), column('service', sa.String), column('service_version', sa.String), column('rtl', sa.Boolean), column('books', sa.BigInteger)) op.add_column('bible_versions', sa.Column('books', sa.BigInteger, nullable=True)) op.execute(bible_versions.update() .where((bible_versions.c.command == op.inline_literal('esv')) | (bible_versions.c.command == op.inline_literal('kjv')) | (bible_versions.c.command == op.inline_literal('nasb')) | (bible_versions.c.command == op.inline_literal('niv')) | (bible_versions.c.command == op.inline_literal('csb')) | (bible_versions.c.command == op.inline_literal('net')) | (bible_versions.c.command == op.inline_literal('isv')) | (bible_versions.c.command == op.inline_literal('msg')) | (bible_versions.c.command == op.inline_literal('nlt')) | (bible_versions.c.command == op.inline_literal('gnv')) | (bible_versions.c.command == op.inline_literal('amp'))) .values({'books': 3})) op.execute(bible_versions.update() .where(bible_versions.c.command == op.inline_literal('sbl')) .values({'books': 2})) op.execute(bible_versions.update() .where(bible_versions.c.command == op.inline_literal('wlc')) .values({'books': 1})) op.execute(bible_versions.update() .where(bible_versions.c.command == op.inline_literal('nrsv')) .values({'books': 1048575})) op.execute(bible_versions.update() .where(bible_versions.c.command == op.inline_literal('kjva')) .values({'books': 532351})) op.execute(bible_versions.update() .where(bible_versions.c.command == op.inline_literal('lxx')) .values({'books': 4062975})) op.execute(bible_versions.update() .where(bible_versions.c.command == op.inline_literal('gnt')) .values({'books': 761855})) op.alter_column('bible_versions', 'books', nullable=False)
def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.alter_column('user', 'active', existing_type=sa.Boolean(), type_=mysql.TINYINT(display_width=1), existing_nullable=True) op.alter_column('tag', 'isBuiltIn', existing_type=sa.Boolean(), type_=mysql.TINYINT(display_width=1), existing_nullable=True) op.alter_column('sponsorrace', 'isRDCertified', existing_type=sa.Boolean(), type_=mysql.TINYINT(display_width=1), existing_nullable=True) op.alter_column('sponsorlevel', 'display', existing_type=sa.Boolean(), type_=mysql.TINYINT(display_width=1), existing_nullable=True) op.alter_column('sponsor', 'isWebsiteUpdated', existing_type=sa.Boolean(), type_=mysql.TINYINT(display_width=1), existing_nullable=True) op.alter_column('sponsor', 'isSponsorThankedFB', existing_type=sa.Boolean(), type_=mysql.TINYINT(display_width=1), existing_nullable=True) op.alter_column('sponsor', 'isLogoReceived', existing_type=sa.Boolean(), type_=mysql.TINYINT(display_width=1), existing_nullable=True) op.alter_column('service', 'isCalendarBlocked', existing_type=sa.Boolean(), type_=mysql.TINYINT(display_width=1), existing_nullable=True) op.alter_column('event', 'isOnCalendar', existing_type=sa.Boolean(), type_=mysql.TINYINT(display_width=1), existing_nullable=True) op.alter_column('course', 'isStandard', existing_type=sa.Boolean(), type_=mysql.TINYINT(display_width=1), existing_nullable=True) op.drop_column('client', 'notes') op.add_column( 'sponsor', sa.Column('isRegSiteUpdated', mysql.TINYINT(display_width=1), autoincrement=False, nullable=True)) sponsor = table('sponsor', column('isRegSiteUpdated', sa.Boolean()), column('RegSiteUpdated', sa.Enum('no', 'yes', 'n/a'))) op.execute( sponsor.update().\ where(sponsor.c.RegSiteUpdated==op.inline_literal('no')).\ values({'isRegSiteUpdated':op.inline_literal(0)}) ) op.execute( sponsor.update().\ where(sponsor.c.RegSiteUpdated==op.inline_literal('yes')).\ values({'isRegSiteUpdated':op.inline_literal(1)}) ) op.drop_column('sponsor', 'RegSiteUpdated')
def downgrade(engine_name): op.execute(hazardcategory.delete( ).where(hazardcategory.c.hazardtype_id == sa.select( [hazardtype.c.id], hazardtype.c.mnemonic == op.inline_literal('EH')))) pass
def upgrade(): conversions = [('slope_lt_30', 'slope_lt_30'), ('slope_30_32', 'slope_30_35'), ('slope_33_35', 'slope_30_35'), ('slope_36_38', 'slope_35_40'), ('slope_39_41', 'slope_35_40'), ('slope_42_44', 'slope_40_45'), ('slope_45_47', 'slope_gt_45'), ('slope_gt_47', 'slope_gt_45')] old_options = ('slope_lt_30', 'slope_30_32', 'slope_33_35', 'slope_36_38', 'slope_39_41', 'slope_42_44', 'slope_45_47', 'slope_gt_47') new_options = ('slope_lt_30', 'slope_30_35', 'slope_35_40', 'slope_40_45', 'slope_gt_45') old_type = sa.Enum(*old_options, name='avalanche_slope', schema='guidebook') new_type = sa.Enum(*new_options, name='avalanche_slope_', schema='guidebook') new_type.create(op.get_bind(), checkfirst=False) # Create new column with temporary name op.add_column('xreports', sa.Column('avalanche_slope_', new_type, nullable=True), schema='guidebook') op.add_column('xreports_archives', sa.Column('avalanche_slope_', new_type, nullable=True), schema='guidebook') # Fill new column using the old 'avalanche_slope' column values xreports = Table('xreports', MetaData(), sa.Column('avalanche_slope', old_type, nullable=True), sa.Column('avalanche_slope_', new_type, nullable=True), schema='guidebook') for (old_value, new_value) in conversions: op.execute( xreports.update(). \ where(xreports.c.avalanche_slope==op.inline_literal(old_value)). \ values({'avalanche_slope_':op.inline_literal(new_value)}) ) archives = Table('xreports_archives', MetaData(), sa.Column('avalanche_slope', old_type, nullable=True), sa.Column('avalanche_slope_', new_type, nullable=True), schema='guidebook') for (old_value, new_value) in conversions: op.execute( archives.update(). \ where(archives.c.avalanche_slope==op.inline_literal(old_value)). \ values({'avalanche_slope_':op.inline_literal(new_value)}) ) # Drop old column and enum op.drop_column('xreports', 'avalanche_slope', schema='guidebook') op.drop_column('xreports_archives', 'avalanche_slope', schema='guidebook') drop_enum('avalanche_slope', schema='guidebook') # Rename enum op.execute( 'ALTER TYPE guidebook.avalanche_slope_ RENAME TO avalanche_slope') # Rename column op.alter_column('xreports', 'avalanche_slope_', new_column_name='avalanche_slope', schema='guidebook') op.alter_column('xreports_archives', 'avalanche_slope_', new_column_name='avalanche_slope', schema='guidebook')