def downgrade():
    op.drop_table('property_key')
    op.drop_table('company')
    op.drop_table('staff')
    op.drop_table('email_temp_model')
    op.drop_table('user')
    op.drop_table('document')
    op.drop_table('company_entity_properties')
    op.drop_table('staff_entity_properties')
    op.drop_table('cross')
    op.drop_table('project')
    op.drop_table('task')
    op.drop_table('task_entity_properties')
    op.drop_table('member')
    op.drop_table('task_out_cross')
    op.drop_table('cross_document')
    op.drop_table('member_entity_properties')
    op.drop_table('project_entity_properties')
    op.drop_table('cross_in_task')
    op.drop_table('meta_properties')
    op.drop_table('user_entity_properties')
    sa.Sequence('task_id_seq').drop(bind=op.get_bind())
    sa.Sequence('cross_id_seq').drop(bind=op.get_bind())
    sa.Sequence('project_id_seq').drop(bind=op.get_bind())
    sa.Sequence('user_id_seq').drop(bind=op.get_bind())
    sa.Sequence('staff_id_seq').drop(bind=op.get_bind())
    sa.Sequence('member_id_seq').drop(bind=op.get_bind())
    sa.Sequence('company_id_seq').drop(bind=op.get_bind())
    sa.Sequence('document_id_seq').drop(bind=op.get_bind())
    sa.Sequence('email_temp_id_seq').drop(bind=op.get_bind())
    ENUM(name='user_gender_enum').drop(op.get_bind(), checkfirst=False)
Esempio n. 2
0
def upgrade_enum(table, column_name, enum_name, old_options, new_options):
    old_type = sa.Enum(*old_options, name=enum_name)
    new_type = sa.Enum(*new_options, name=enum_name)
    tmp_type = sa.Enum(*new_options, name="_" + enum_name)
    # Create a temporary type, convert and drop the "old" type
    tmp_type.create(op.get_bind(), checkfirst=False)
    op.execute(
        u'ALTER TABLE {0} ALTER COLUMN {1} TYPE _{2}'
        u' USING {1}::text::_{2}'.format(
            table,
            column_name,
            enum_name
        )
    )
    old_type.drop(op.get_bind(), checkfirst=False)
    # Create and convert to the "new" type
    new_type.create(op.get_bind(), checkfirst=False)
    op.execute(
        u'ALTER TABLE {0} ALTER COLUMN {1} TYPE {2}'
        u' USING {1}::text::{2}'.format(
            table,
            column_name,
            enum_name
        )
    )
    tmp_type.drop(op.get_bind(), checkfirst=False)
def downgrade():
    op.drop_table('contact_roles')
    Enum(name="role_types").drop(op.get_bind(), checkfirst=False)
    op.drop_table('contact_phones')
    Enum(name="phone_types").drop(op.get_bind(), checkfirst=False)
    op.drop_table('contact_roles_association')
    op.drop_table('contact')
Esempio n. 4
0
def upgrade():
    command = text("""
        UPDATE yelp_category
        SET start_time = "08:00:00", end_time="11:00:00"
        WHERE name = "breakfast"
    """)
    op.get_bind().execute(command)

    command = text("""
        UPDATE yelp_category
        SET start_time = "11:00:00", end_time="14:00:00"
        WHERE name = "lunch"
    """)
    op.get_bind().execute(command)

    command = text("""
        UPDATE yelp_category
        SET start_time = "14:00:00", end_time="18:00:00"
        WHERE name = "attraction"
    """)
    op.get_bind().execute(command)

    command = text("""
        UPDATE yelp_category
        SET start_time = "18:00:00", end_time="20:00:00"
        WHERE name = "dinner"
    """)
    op.get_bind().execute(command)

    command = text("""
        UPDATE yelp_category
        SET start_time = "20:00:00", end_time="0:00:00"
        WHERE name = "nightlife"
    """)
    op.get_bind().execute(command)
def downgrade():
    if not exists_in_db(op.get_bind(), 'mailinglist', 'header_matches'):
        # SQLite will not have deleted the former column, since it does not
        # support column deletion.
        op.add_column(
            'mailinglist',
            sa.Column('header_matches', sa.PickleType, nullable=True))
    # Now migrate the data.  It can't be offline because we need to read the
    # pickles.
    connection = op.get_bind()
    # Don't import the table definition from the models, it may break this
    # migration when the model is updated in the future (see the Alembic doc).
    mlist_table = sa.sql.table(
        'mailinglist',
        sa.sql.column('id', sa.Integer),
        sa.sql.column('header_matches', sa.PickleType)
        )
    header_match_table = sa.sql.table(
        'headermatch',
        sa.sql.column('mailing_list_id', sa.Integer),
        sa.sql.column('header', sa.Unicode),
        sa.sql.column('pattern', sa.Unicode),
        )
    for mlist_id, header, pattern in connection.execute(
            header_match_table.select()).fetchall():
        mlist = connection.execute(mlist_table.select().where(
            mlist_table.c.id == mlist_id)).fetchone()
        header_matches = mlist['header_matches']
        if not header_matches:
            header_matches = []
        header_matches.append((header, pattern))
        connection.execute(mlist_table.update().where(
            mlist_table.c.id == mlist_id).values(
            header_matches=header_matches))
    op.drop_table('headermatch')
def upgrade():
    # add a new "comparable" column. default as true
    with op.batch_alter_table('answer', naming_convention=convention) as batch_op:
        batch_op.add_column(sa.Column('comparable', sa.Integer(), nullable=False, default=True, server_default='1'))

    # Patch existing answers from instructors and TAs as non-comparable.
    # Note that existing answers from sys admin are considered comparable (i.e. no need to patch).
    # sqlite doesn't support in-clause with multiple columns...
    # update = text(
    #     "UPDATE answer SET comparable = 0 "
    #     "WHERE (assignment_id, user_id) IN ( "
    #     "  SELECT a.id, uc.user_id "
    #     "  FROM user_course uc "
    #     "  JOIN assignment a "
    #     "    ON a.course_id = uc.course_id "
    #     "  WHERE uc.course_role IN ('Instructor', 'Teaching Assistant'))"
    # )
    # ... use a potentially slower query
    update = text(
        "UPDATE answer SET comparable = 0 "
        "WHERE EXISTS ( "
        "  SELECT 1 "
        "  FROM user_course "
        "  JOIN assignment "
        "    ON assignment.course_id = user_course.course_id "
        "  WHERE "
        "    assignment.id = answer.assignment_id "
        "    AND user_course.user_id = answer.user_id "
        "    AND user_course.course_role IN ('Instructor', 'Teaching Assistant'))"
    )
    op.get_bind().execute(update)
def upgrade():
    enum_roles = Enum("SALES", "ACCOUNTS", name='role_types')
    enum_roles.create(op.get_bind(), checkfirst=False)

    op.create_table('contact_roles',
                    Column('id', Integer, primary_key=True),
                    Column('role', enum_roles)
                    )

    enum_phones = Enum("OFFICE", "MOBILE", "OTHER", name='phone_types')
    enum_phones.create(op.get_bind(), checkfirst=False)

    op.create_table('contact_phones',
                    Column('id', Integer, primary_key=True,),
                    Column('contact_id', Integer, ForeignKey('contact.id')),
                    Column('type', enum_phones),
                    Column('number', String)
                    )

    op.create_table('contact_roles_association',
                    Column('contact_id', Integer, ForeignKey('contact.id')),
                    Column('contact_roles_id', Integer, ForeignKey('contact_roles.id'))
                    )

    op.create_table('contact',
                    Column('id', Integer, primary_key=True),
                    Column('customer_id', Integer, ForeignKey('customer.id')),
                    Column('firstname', String),
                    Column('lastname', String),
                    Column('email', String),
                    )
def downgrade():
    # FIXME: this adds extraneous commas
    return
    log = sa.table('log', sa.column('type', sa.String), sa.column('msg', sa.String))
    rows = op.get_bind().execute(log.select().where(log.c.type == 'kick')).fetchall()
    values = [{'old_msg': x.msg, 'msg': x.msg.replace(' ', ',', 1)} for x in rows]
    op.get_bind().execute(log.update().where(log.c.msg == sa.bindparam('old_msg')).values(msg=sa.bindparam('msg')), values)
Esempio n. 9
0
def downgrade():
    ### commands auto generated by Alembic - please adjust! ###
    from sqlalchemy.sql import text
    op.get_bind().execute(text("DELETE FROM role WHERE name IN ('organization_view', 'organization_create', 'organization_edit','organization_delete')"))
    op.drop_constraint('supplier_organization_id_fkey', 'supplier', type_='foreignkey')
    op.drop_column('supplier', 'organization_id')
    op.drop_constraint('shipping_organization_id_fkey', 'shipping', type_='foreignkey')
    op.drop_column('shipping', 'organization_id')
    op.drop_constraint('sales_order_organization_id_fkey', 'sales_order', type_='foreignkey')
    op.drop_column('sales_order', 'organization_id')
    op.drop_constraint('receiving_organization_id_fkey', 'receiving', type_='foreignkey')
    op.drop_column('receiving', 'organization_id')
    op.drop_constraint('purchase_order_organization_id_fkey', 'purchase_order', type_='foreignkey')
    op.drop_column('purchase_order', 'organization_id')
    op.drop_constraint('product_category_organization_id_fkey', 'product_category', type_='foreignkey')
    op.drop_column('product_category', 'organization_id')
    op.drop_constraint('product_organization_id_fkey', 'product', type_='foreignkey')
    op.drop_column('product', 'organization_id')
    op.drop_constraint('preference_organization_id_fkey', 'preference', type_='foreignkey')
    op.drop_column('preference', 'organization_id')
    op.drop_constraint('inventory_transaction_organization_id_fkey', 'inventory_transaction', type_='foreignkey')
    op.drop_column('inventory_transaction', 'organization_id')
    op.drop_constraint('incoming_organization_id_fkey', 'incoming', type_='foreignkey')
    op.drop_column('incoming', 'organization_id')
    op.drop_constraint('expense_organization_id_fkey', 'expense', type_='foreignkey')
    op.drop_column('expense', 'organization_id')
    op.drop_constraint('customer_organization_id_fkey', 'customer', type_='foreignkey')
    op.drop_column('customer', 'organization_id')
    op.drop_constraint('user_organization_id_fkey', 'user', type_='foreignkey')
    op.drop_column('user', 'organization_id')
    op.drop_table('organization')
def downgrade():
    ########################## drop columns ##########################

    op.drop_column('ps_aors', 'support_path')
    op.drop_column('ps_aors', 'outbound_proxy')
    op.drop_column('ps_aors', 'maximum_expiration')

    op.drop_column('ps_contacts', 'path')
    op.drop_column('ps_contacts', 'outbound_proxy')

    op.alter_column('ps_endpoints', 'mwi_from_user',
                    new_column_name='mwi_fromuser',
                    existing_type=sa.String(40))

    op.drop_column('ps_endpoints', 'set_var')
    op.drop_column('ps_endpoints', 'redirect_method')
    op.drop_column('ps_endpoints', 'media_address')

    ########################## drop tables ###########################

    op.drop_table('ps_registrations')
    op.drop_table('ps_transports')
    op.drop_table('ps_globals')
    op.drop_table('ps_systems')

    ########################## drop enums ############################

    sa.Enum(name=PJSIP_TRANSPORT_PROTOCOL_NAME).drop(
        op.get_bind(), checkfirst=False)
    sa.Enum(name=PJSIP_TRANSPORT_METHOD_NAME).drop(
        op.get_bind(), checkfirst=False)
    sa.Enum(name=PJSIP_REDIRECT_METHOD_NAME).drop(
        op.get_bind(), checkfirst=False)
def upgrade():
    ### commands auto generated by Alembic - please adjust! ###
    ENUM(name="standardtekst_types").drop(op.get_bind(), checkfirst=True)
    new_types_enum = sa.Enum('Andre opplysninger',
                             u'Merk følgende', name='standardtekst_types')
    new_types_enum.create(op.get_bind(), checkfirst=True)
    op.add_column('standardtekst', sa.Column('type', new_types_enum, nullable=True))
def upgrade(active_plugins=None, options=None):
    if not migration.should_run(active_plugins, migration_for_plugins):
        return

    op.add_column('ml2_port_bindings',
                  sa.Column('vif_details', sa.String(length=4095),
                            nullable=False, server_default=''))
    if op.get_bind().engine.name == 'ibm_db_sa':
        op.execute(
            "UPDATE ml2_port_bindings SET"
            " vif_details = '{\"port_filter\": true}'"
            " WHERE cap_port_filter = 1")
        op.execute(
            "UPDATE ml2_port_bindings SET"
            " vif_details = '{\"port_filter\": false}'"
            " WHERE cap_port_filter = 0")
    else:
        op.execute(
            "UPDATE ml2_port_bindings SET"
            " vif_details = '{\"port_filter\": true}'"
            " WHERE cap_port_filter = true")
        op.execute(
            "UPDATE ml2_port_bindings SET"
            " vif_details = '{\"port_filter\": false}'"
            " WHERE cap_port_filter = false")
    op.drop_column('ml2_port_bindings', 'cap_port_filter')
    if op.get_bind().engine.name == 'ibm_db_sa':
        op.execute("CALL SYSPROC.ADMIN_CMD('REORG TABLE ml2_port_bindings')")
def downgrade(active_plugins=None, options=None):
    if not migration.should_run(active_plugins, migration_for_plugins):
        return

    if op.get_bind().engine.name == 'ibm_db_sa':
        # Note(xuhanp): DB2 doesn't allow nullable=False Column with
        # "DEFAULT" clause not specified. So server_default is used.
        # Using sa.text will result "DEFAULT 0" for cap_port_filter.
        op.add_column('ml2_port_bindings',
                      sa.Column('cap_port_filter', sa.Boolean(),
                                nullable=False,
                                server_default=sa.text("0")))
        op.execute(
            "UPDATE ml2_port_bindings SET"
            " cap_port_filter = 1"
            " WHERE vif_details LIKE '%\"port_filter\": true%'")
    else:
        op.add_column('ml2_port_bindings',
                      sa.Column('cap_port_filter', sa.Boolean(),
                                nullable=False,
                                server_default=sa.text("false")))
        op.execute(
            "UPDATE ml2_port_bindings SET"
            " cap_port_filter = true"
            " WHERE vif_details LIKE '%\"port_filter\": true%'")
    op.drop_column('ml2_port_bindings', 'vif_details')
    if op.get_bind().engine.name == 'ibm_db_sa':
        op.execute("CALL SYSPROC.ADMIN_CMD('REORG TABLE ml2_port_bindings')")
def downgrade():
    # revert table Judgements
    op.add_column("Judgements", sa.Column('criteriaandcourses_id', sa.Integer(), nullable=True))
    # populate column with the judgements' criteriaandcourses_id
    update = text(
        # rewrite into subquery to support SQLite. need more testing to verify
        "UPDATE Judgements "
        "SET criteriaandcourses_id = "
        "(SELECT cc.id FROM CriteriaAndCourses cc "
        "JOIN CriteriaAndQuestions cq ON cq.criteria_id = cc.criteria_id  "
        "JOIN Questions q ON cq.questions_id = q.id "
        "JOIN Posts p ON q.posts_id = p.id "
        "WHERE Judgements.criteriaandquestions_id = cq.id AND p.courses_id = cc.courses_id)"
        # "UPDATE Judgements j " + \
        # "JOIN CriteriaAndQuestions cq ON j.criteriaandquestions_id = cq.id " + \
        # "JOIN Questions q ON cq.questions_id = q.id " + \
        # "JOIN Posts p ON q.posts_id = p.id " + \
        # "JOIN CriteriaAndCourses cc ON cq.criteria_id = cc.criteria_id AND p.courses_id = cc.courses_id " + \
        # "SET j.criteriaandcourses_id = cc.id"
    )
    op.get_bind().execute(update)
    with op.batch_alter_table('Judgements', naming_convention=convention) as batch_op:
        batch_op.create_foreign_key('fk_Judgements_criteriaandcourses_id_CriteriaAndCourses', 'CriteriaAndCourses',
                                    ['criteriaandcourses_id'], ['id'], ondelete="CASCADE")
        batch_op.alter_column('criteriaandcourses_id', nullable=False, existing_type=sa.Integer())
        batch_op.drop_constraint('fk_Judgements_criteriaandquestions_id_CriteriaAndQuestions',
                                 'foreignkey')
        # batch_op.drop_index("criteriaandquestions_id")
        batch_op.drop_column("criteriaandquestions_id")

    # revert table Scores
    op.add_column('Scores', sa.Column('criteriaandcourses_id', sa.Integer(), nullable=True))
    # populate column with the scores' criteriaandcourses_id
    update = text(
        # rewrite into subquery to support SQLite. need more testing to verify
        "UPDATE Scores "
        "SET criteriaandcourses_id = "
        "(SELECT cc.id FROM CriteriaAndCourses cc "
        "JOIN CriteriaAndQuestions cq ON cq.criteria_id = cc.criteria_id "
        "JOIN Questions q ON cq.questions_id = q.id "
        "JOIN Posts p ON q.posts_id = p.id "
        "WHERE Scores.criteriaandquestions_id = cq.id AND p.courses_id = cc.courses_id)"
        # "UPDATE Scores s " + \
        # "JOIN CriteriaAndQuestions cq ON s.criteriaandquestions_id = cq.id " + \
        # "JOIN Questions q ON cq.questions_id = q.id " + \
        # "JOIN Posts p ON q.posts_id = p.id " + \
        # "JOIN CriteriaAndCourses cc ON cq.criteria_id = cc.criteria_id AND p.courses_id = cc.courses_id " + \
        # "SET s.criteriaandcourses_id = cc.id"
    )
    op.get_bind().execute(update)
    with op.batch_alter_table('Scores', naming_convention=convention) as batch_op:
        batch_op.create_foreign_key('fk_Scores_criteriaandcourses_id_CriteriaAndCourses', 'CriteriaAndCourses',
                                    ['criteriaandcourses_id'], ['id'], ondelete="CASCADE")
        batch_op.alter_column('criteriaandcourses_id', nullable=False, existing_type=sa.Integer())
        batch_op.drop_constraint('fk_Scores_criteriaandquestions_id_CriteriaAndQuestions', 'foreignkey')
        # batch_op.drop_index('criteriaandquestions_id')
        batch_op.drop_column('criteriaandquestions_id')

    # drop table CriteriaAndQuestions
    op.drop_table('CriteriaAndQuestions')
def upgrade():
    if driver_name == 'postgresql':
        temp_credits_role.create(op.get_bind(), checkfirst=False)
        op.execute(
            'ALTER TABLE credits ALTER COLUMN role TYPE temp_credits_role'
            ' USING role::text::temp_credits_role'
        )
        old_credits_role.drop(op.get_bind(), checkfirst=False)
        new_credits_role.create(op.get_bind(), checkfirst=False)
        op.execute(
            'ALTER TABLE credits ALTER COLUMN role TYPE credits_role'
            ' USING role::text::credits_role'
        )
        temp_credits_role.drop(op.get_bind(), checkfirst=False)
    else:
        op.alter_column(
            'credits',
            'role',
            existing_type=old_credits_role,
            type_=new_credits_role,
        )

    op.drop_constraint('credits_pkey', 'credits', 'primary')
    op.create_primary_key('credits_pkey', 'credits',
                          ['work_id', 'person_id', 'role'])
def upgrade():
    op.create_table(
        'SelfEvalTypes',
        sa.Column('id', sa.Integer(), nullable=True),
        sa.Column('name', sa.String(length=255), nullable=False),
        sa.PrimaryKeyConstraint('id'),
        sa.UniqueConstraint('name'),
        mysql_charset='utf8',
        mysql_collate='utf8_unicode_ci',
        mysql_engine='InnoDB'
    )

    # populate table with a self evaluation type
    insert = text(
        "INSERT INTO SelfEvalTypes (name) " +
        "VALUES ('No Comparison with Another Answer')"
    )
    op.get_bind().execute(insert)

    op.add_column(u'Questions', sa.Column('selfevaltype_id', sa.Integer(), nullable=True))

    with op.batch_alter_table('Questions', naming_convention=convention) as batch_op:
        batch_op.create_foreign_key(
            'fk_Questions_selfevaltype_id_SelfEvalTypes', 'SelfEvalTypes',
            ['selfevaltype_id'], ['id'], ondelete="CASCADE")
def upgrade():
    op.add_column('builds', sa.Column('semrel', custom_types.Semver(),
                                      nullable=True))

    build = sa.Table("builds", metadata,
                     sa.Column("id", sa.Integer),
                     sa.Column("base_package_name", sa.String(length=64)),
                     sa.Column("projrelease_id", sa.Integer),
                     sa.Column("epoch", sa.Integer),
                     sa.Column("version", sa.String(length=64)),
                     sa.Column("release", sa.String(length=64)),
                     sa.Column("semver", custom_types.Semver()),
                     sa.Column("semrel", custom_types.Semver()),
                     )

    for b in op.get_bind().execute(sa.select([build.c.id, build.c.release])):
        bid, brel = b
        brel = custom_types.to_semver(brel)
        op.get_bind().execute((build.update()
                               .where(build.c.id == bid)
                               .values(semrel=sa.func.to_semver(brel))))

    op.alter_column('builds', sa.Column('semrel', custom_types.Semver(),
                                        nullable=False))

    op.create_index('ix_builds_semrel', 'builds', ['semrel'])
def upgrade():

    if not migration.schema_has_table('ml2_port_bindings'):
        # In the database we are migrating from, the configured plugin
        # did not create the ml2_port_bindings table.
        return

    op.add_column('ml2_port_bindings',
                  sa.Column('vif_details', sa.String(length=4095),
                            nullable=False, server_default=''))
    if op.get_bind().engine.name == 'ibm_db_sa':
        op.execute(
            "UPDATE ml2_port_bindings SET"
            " vif_details = '{\"port_filter\": true}'"
            " WHERE cap_port_filter = 1")
        op.execute(
            "UPDATE ml2_port_bindings SET"
            " vif_details = '{\"port_filter\": false}'"
            " WHERE cap_port_filter = 0")
    else:
        op.execute(
            "UPDATE ml2_port_bindings SET"
            " vif_details = '{\"port_filter\": true}'"
            " WHERE cap_port_filter = true")
        op.execute(
            "UPDATE ml2_port_bindings SET"
            " vif_details = '{\"port_filter\": false}'"
            " WHERE cap_port_filter = false")
    op.drop_column('ml2_port_bindings', 'cap_port_filter')
    if op.get_bind().engine.name == 'ibm_db_sa':
        op.execute("CALL SYSPROC.ADMIN_CMD('REORG TABLE ml2_port_bindings')")
def upgrade():

    if op.get_context().dialect.name == 'postgresql':
        # INFO - G.M - 2018-11-27 - TO modify type in postgresq, we should
        # create a new one set column type to this new one and remove old one
        op.execute("ALTER TYPE authtype RENAME TO authtype_old;")
        op.execute("ALTER TABLE users alter auth_type drop default;")
        enum = sa.Enum(*new_auth_type_list, name='authtype')
        enum.create(op.get_bind(), checkfirst=False)
        with op.batch_alter_table('users') as batch_op:
            batch_op.alter_column(
                'auth_type',
                type_=enum,
                postgresql_using="auth_type::text::authtype",
                server_default='INTERNAL'
            )
        op.execute("DROP TYPE authtype_old;")
    else:
        # INFO - G.M - 2018-11-27 - MYSQL case
        enum = sa.Enum(*new_auth_type_list, name='authtype')
        enum.create(op.get_bind(), checkfirst=False)
        with op.batch_alter_table('users') as batch_op:
            batch_op.alter_column(
                'auth_type',
                type_=enum,
            )
def upgrade():
    """Upgrade the database."""
    ALARM_ACTION_ENUM.create(op.get_bind())

    op.add_column("chore", sa.Column("due_at", sa.TIMESTAMP(timezone=True), nullable=True))
    op.add_column("chore", sa.Column("alarm_at", sa.TIMESTAMP(timezone=True), nullable=True))
    add_required_column("chore", "created_at", sa.TIMESTAMP(timezone=True), "alarm_start")
    add_required_column("chore", "updated_at", sa.TIMESTAMP(timezone=True), "alarm_start")
    op.drop_column("chore", "alarm_start")

    op.execute(
        """UPDATE chore
SET due_at = alarm.next_at,
  alarm_at = coalesce(alarm.original_at, alarm.next_at)
FROM alarm
WHERE alarm.chore_id = chore.id AND alarm.current_state = 'unseen';
"""
    )

    # Alarm date should not be inferior to the due date.
    op.execute(
        """UPDATE chore
SET alarm_at = due_at
WHERE alarm_at < chore.due_at
"""
    )

    op.add_column("alarm", sa.Column("due_at", sa.TIMESTAMP(timezone=True), nullable=True))
    op.add_column("alarm", sa.Column("alarm_at", sa.TIMESTAMP(timezone=True), nullable=True))
    add_required_column("alarm", "action", ALARM_ACTION_ENUM, "'snooze'")
    op.add_column("alarm", sa.Column("snooze_repetition", sa.String(), nullable=True))
    add_required_column("alarm", "created_at", sa.TIMESTAMP(timezone=True), "next_at")

    op.execute(
        """UPDATE alarm
SET due_at          = next_at,
  alarm_at          = original_at,
  snooze_repetition = last_snooze,
  "action"          = CASE current_state
                      WHEN 'skipped'
                        THEN 'jump'
                      WHEN 'snoozed'
                        THEN 'snooze'
                      WHEN 'completed'
                        THEN 'complete'
                      WHEN 'killed'
                        THEN 'pause'
                      ELSE 'snooze'
                      END :: alarm_action_enum
"""
    )

    op.drop_column("alarm", "next_at")
    op.drop_column("alarm", "original_at")
    op.drop_column("alarm", "last_snooze")
    op.drop_column("alarm", "current_state")
    ALARM_STATE_ENUM.drop(op.get_bind())

    op.drop_table("roles")
    op.drop_table("users")
def downgrade():
    op.add_column("trades", sa.Column("completed", sa.Boolean, nullable=False, serverdefault=False))
    op.alter_column("trades", "completed", serverdefault=None)

    op.get_bind().execute(trades.update().where(trades.c.completed_date.isnot(None)).values({"completed": True}))

    op.drop_column("trades", "completed_date")
Esempio n. 22
0
def upgrade():
    op.create_table(
        'QuestionsAndSelfEvalTypes',
        sa.Column('id', sa.Integer(), nullable=False),
        sa.Column('questions_id', sa.Integer(), nullable=False),
        sa.Column('selfevaltypes_id', sa.Integer(), nullable=False),
        sa.ForeignKeyConstraint(['questions_id'], ['Questions.id'], ondelete='CASCADE'),
        sa.ForeignKeyConstraint(['selfevaltypes_id'], ['SelfEvalTypes.id'], ondelete='CASCADE'),
        sa.PrimaryKeyConstraint('id'),
        mysql_charset='utf8',
        mysql_collate='utf8_unicode_ci',
        mysql_engine='InnoDB'
    )

    # populate table above
    populate = text(
        "INSERT INTO QuestionsAndSelfEvalTypes (questions_id, selfevaltypes_id) " +
        "SELECT q.id, q.selfevaltype_id " +
        "FROM Questions as q " +
        "WHERE q.selfevaltype_id IS NOT NULL"
    )
    op.get_bind().execute(populate)

    # drop selfevaltype_id foreign key
    with op.batch_alter_table('Questions', naming_convention=convention) as batch_op:
        batch_op.drop_constraint('fk_Questions_selfevaltype_id_SelfEvalTypes', 'foreignkey')
        # drop key/index + column
        # batch_op.drop_index("selfevaltype_id")
        batch_op.drop_column("selfevaltype_id")

    op.add_column('PostsForJudgements',
                  sa.Column('selfeval', sa.Boolean(name='selfeval'), nullable=False, server_default='0', default=False))
Esempio n. 23
0
def upgrade():
    from sqlalchemy.sql import text
    enum_values_table = sa.table('enum_values',
                                 sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
                                 sa.Column('type_id', sa.Integer(), nullable=True),
                                 sa.Column('code', sa.String(length=32), nullable=True),
                                 sa.Column('display', sa.String(length=64), nullable=False),
                                 )
    res = op.get_bind().execute('SELECT max(id)+1 FROM enum_values')
    results = res.fetchall()
    cm = 14
    for r in results:
        cm = r[0]
    op.bulk_insert(enum_values_table, [
        {'id': cm, 'type_id': 1, 'code': 'INVENTORY_TRANSACTION_TYPE', 'display': u'库存变动类型'},
        {'id': cm + 1, 'type_id': 1, 'code': 'RECEIVING_STATUS', 'display': u'收货单状态'},
        {'id': cm + 2, 'type_id': 1, 'code': 'PURCHASE_ORDER_STATUS', 'display': u'采购单状态'},
        {'id': cm + 3, 'type_id': 1, 'code': 'SHIPPING_STATUS', 'display': u'发货单状态'},
        {'id': cm + 4, 'type_id': cm, 'code': 'PURCHASE_IN', 'display': u'采购入库'},
        {'id': cm + 5, 'type_id': cm, 'code': 'SALES_OUT', 'display': u'销售出库'},
        {'id': cm + 6, 'type_id': cm, 'code': 'INVENTORY_DAMAGED', 'display': u'商品损毁'},
        {'id': cm + 7, 'type_id': cm, 'code': 'INVENTORY_LOST', 'display': u'商品丢失'},
        {'id': cm + 8, 'type_id': cm + 1, 'code': 'RECEIVING_DRAFT', 'display': u'收货单草稿'},
        {'id': cm + 9, 'type_id': cm + 1, 'code': 'RECEIVING_COMPLETE', 'display': u'收货单完成'},
        {'id': cm + 10, 'type_id': cm + 2, 'code': 'PURCHASE_ORDER_DRAFT', 'display': u'草稿'},
        {'id': cm + 11, 'type_id': cm + 2, 'code': 'PURCHASE_ORDER_ISSUED', 'display': u'已发出'},
        {'id': cm + 12, 'type_id': cm + 2, 'code': 'PURCHASE_ORDER_PART_RECEIVED', 'display': u'部分收货'},
        {'id': cm + 13, 'type_id': cm + 2, 'code': 'PURCHASE_ORDER_RECEIVED', 'display': u'收货完成'},
        {'id': cm + 14, 'type_id': cm + 3, 'code': 'SHIPPING_COMPLETE', 'display': u'发货完成'},
    ], multiinsert=False)
    op.get_bind().execute(text("ALTER SEQUENCE enum_values_id_seq RESTART WITH " + str(cm + 15) + ";"))
def upgrade():
    ### commands auto generated by Alembic - please adjust! ###
    op.create_table('api_keys_history',
    sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
    sa.Column('name', sa.String(length=255), nullable=False),
    sa.Column('secret', sa.String(length=255), nullable=False),
    sa.Column('service_id', postgresql.UUID(as_uuid=True), nullable=False),
    sa.Column('expiry_date', sa.DateTime(), nullable=True),
    sa.Column('created_at', sa.DateTime(), nullable=True),
    sa.Column('updated_at', sa.DateTime(), nullable=True),
    sa.Column('created_by_id', postgresql.UUID(as_uuid=True), nullable=True),
    sa.Column('version', sa.Integer(), autoincrement=False, nullable=False),
    sa.PrimaryKeyConstraint('id', 'version')
    )

    op.create_index(op.f('ix_api_keys_history_created_by_id'), 'api_keys_history', ['created_by_id'], unique=False)
    op.create_index(op.f('ix_api_keys_history_service_id'), 'api_keys_history', ['service_id'], unique=False)
    op.add_column('api_keys', sa.Column('created_at', sa.DateTime(), nullable=True))
    op.add_column('api_keys', sa.Column('created_by_id', postgresql.UUID(as_uuid=True), nullable=True))
    op.add_column('api_keys', sa.Column('updated_at', sa.DateTime(), nullable=True))
    op.add_column('api_keys', sa.Column('version', sa.Integer(), nullable=True))

    op.get_bind()
    op.execute('UPDATE api_keys SET created_by_id = (SELECT user_id FROM user_to_service WHERE api_keys.service_id = user_to_service.service_id LIMIT 1)')
    op.execute('UPDATE api_keys SET version = 1, created_at = now()')
    op.execute('INSERT INTO api_keys_history (id, name, secret, service_id, expiry_date, created_at, updated_at, created_by_id, version) SELECT id, name, secret, service_id, expiry_date, created_at, updated_at, created_by_id, version FROM api_keys')

    op.alter_column('api_keys', 'created_at', nullable=False)
    op.alter_column('api_keys', 'created_by_id', nullable=False)
    op.alter_column('api_keys', 'version', nullable=False)

    op.create_index(op.f('ix_api_keys_created_by_id'), 'api_keys', ['created_by_id'], unique=False)
    op.create_foreign_key('fk_api_keys_created_by_id', 'api_keys', 'users', ['created_by_id'], ['id'])
def downgrade(pyramid_env):
    with context.begin_transaction():
        op.drop_table('profile_field')
        op.drop_table('text_field')
        op.drop_table('configurable_field')
        sa.Enum(name='text_field_types').drop(op.get_bind(), checkfirst=False)
        sa.Enum(name='configurable_field_identifiers').drop(op.get_bind(), checkfirst=False)
Esempio n. 26
0
def downgrade():
    ### commands auto generated by Alembic - please adjust! ###
    op.drop_table('match')
    op.drop_table('judge')
    op.drop_table('tournament')
    ENUM(name="match_results").drop(op.get_bind(), checkfirst=False)
    ENUM(name="tournament_statuses").drop(op.get_bind(), checkfirst=False)
Esempio n. 27
0
def upgrade():
    ### commands auto generated by Alembic - please adjust! ###
    op.drop_column('sales_order_line', 'actual_amount')
    op.drop_column('sales_order_line', 'adjust_amount')
    op.drop_column('sales_order_line', 'original_amount')
    preference_table = op.create_table('preference',
    sa.Column('id', sa.Integer(), nullable=False),
    sa.Column('def_so_incoming_type_id', sa.Integer(), nullable=False),
    sa.Column('def_so_incoming_status_id', sa.Integer(), nullable=False),
    sa.Column('def_so_exp_type_id', sa.Integer(), nullable=False),
    sa.Column('def_so_exp_status_id', sa.Integer(), nullable=False),
    sa.Column('def_po_logistic_exp_status_id', sa.Integer(), nullable=False),
    sa.Column('def_po_logistic_exp_type_id', sa.Integer(), nullable=False),
    sa.Column('def_po_goods_exp_status_id', sa.Integer(), nullable=False),
    sa.Column('def_po_goods_exp_type_id', sa.Integer(), nullable=False),
    sa.Column('remark', sa.Text(), nullable=True),
    sa.ForeignKeyConstraint(['def_so_incoming_type_id'], ['enum_values.id'], ),
    sa.ForeignKeyConstraint(['def_so_incoming_status_id'], ['enum_values.id'], ),
    sa.ForeignKeyConstraint(['def_so_exp_type_id'], ['enum_values.id'], ),
    sa.ForeignKeyConstraint(['def_so_exp_status_id'], ['enum_values.id'], ),
    sa.ForeignKeyConstraint(['def_po_logistic_exp_status_id'], ['enum_values.id'], ),
    sa.ForeignKeyConstraint(['def_po_logistic_exp_type_id'], ['enum_values.id'], ),
    sa.ForeignKeyConstraint(['def_po_goods_exp_status_id'], ['enum_values.id'], ),
    sa.ForeignKeyConstraint(['def_po_goods_exp_type_id'], ['enum_values.id'], ),
    sa.PrimaryKeyConstraint('id')
    )
    op.bulk_insert(preference_table, [
        {'id':1, 'def_so_incoming_type_id':12, 'def_so_incoming_status_id':10,
         'def_so_exp_type_id':9, 'def_so_exp_status_id':6,
         'def_po_logistic_exp_status_id': 6, 'def_po_logistic_exp_type_id': 7,
         'def_po_goods_exp_status_id': 6, 'def_po_goods_exp_type_id': 8},
    ],multiinsert=False)
    from sqlalchemy.sql import text
    op.get_bind().execute(text("ALTER SEQUENCE preference_id_seq RESTART WITH 2;"))
def upgrade():
    try:
        with op.batch_alter_table(
            "Criteria", naming_convention=convention, table_args=(UniqueConstraint("name"))
        ) as batch_op:
            batch_op.drop_constraint("uq_Criteria_name", type_="unique")
    except exc.InternalError:
        with op.batch_alter_table(
            "Criteria", naming_convention=convention, table_args=(UniqueConstraint("name"))
        ) as batch_op:
            batch_op.drop_constraint("name", type_="unique")
    except ValueError:
        logging.warning("Drop unique constraint is not support for SQLite, dropping uq_Critiera_name ignored!")

    # set existing criteria's active attribute to True using server_default
    op.add_column(
        "CriteriaAndCourses",
        sa.Column("active", sa.Boolean(name="active"), default=True, server_default="1", nullable=False),
    )
    op.add_column(
        "Criteria", sa.Column("public", sa.Boolean(name="public"), default=False, server_default="0", nullable=False)
    )

    # set the first criteria as public
    t = {"name": "Which is better?", "public": True}
    op.get_bind().execute(text("Update Criteria set public=:public where name=:name"), **t)
def upgrade():
    template_history_insert = """INSERT INTO templates_history (id, name, template_type, created_at,
                                                                content, archived, service_id,
                                                                subject, created_by_id, version)
                                 VALUES ('{}', '{}', '{}', '{}', '{}', False, '{}', '{}', '{}', 1)
                              """
    template_insert = """INSERT INTO templates (id, name, template_type, created_at,
                                                content, archived, service_id, subject, created_by_id, version)
                                 VALUES ('{}', '{}', '{}', '{}', '{}', False, '{}', '{}', '{}', 1)
                              """
    content = """You already have a GOV.UK Notify account with this email address.

Sign in here: ((signin_url))

If you’ve forgotten your password, you can reset it here: ((forgot_password_url))


If you didn’t try to register for a GOV.UK Notify account recently, please let us know here: ((feedback_url))"""

    op.get_bind()
    op.execute(template_history_insert.format('0880fbb1-a0c6-46f0-9a8e-36c986381ceb',
                                              'Your GOV.UK Notify account', 'email',
                                              datetime.utcnow(), content, service_id,
                                              'Your GOV.UK Notify account', user_id))
    op.execute(
        template_insert.format('0880fbb1-a0c6-46f0-9a8e-36c986381ceb', 'Your GOV.UK Notify account', 'email',
                               datetime.utcnow(), content, service_id,
                               'Your GOV.UK Notify account', user_id))
Esempio n. 30
0
def downgrade():
    # insert selfevaltype_id column into Questions table
    op.add_column(u'Questions', sa.Column('selfevaltype_id', sa.Integer(), nullable=True))

    # populate the column - only populate the no comparison self evaluation type
    type = text(
        "SELECT id FROM SelfEvalTypes " +
        "WHERE name='No Comparison with Another Answer'"
    )
    conn = op.get_bind()
    res = conn.execute(type)
    selfevaltype = res.fetchall()

    populate = text(
        "UPDATE Questions " +
        "SET selfevaltype_id = "
        "(SELECT qs.selfevaltypes_id " +
        "FROM QuestionsAndSelfEvalTypes qs "
        "WHERE Questions.id = qs.questions_id " +
        "AND qs.selfevaltypes_id = " + str(selfevaltype[0][0]) + ')'
    )
    op.get_bind().execute(populate)

    with op.batch_alter_table('Questions', naming_convention=convention) as batch_op:
        batch_op.create_foreign_key('fk_Questions_selfevaltype_id_SelfEvalTypes', 'SelfEvalTypes',
                                    ['selfevaltype_id'], ['id'], ondelete="CASCADE")

    with op.batch_alter_table('PostsForJudgements', naming_convention=convention) as batch_op:
        batch_op.drop_column('selfeval')

    op.drop_table('QuestionsAndSelfEvalTypes')
Esempio n. 31
0
def downgrade():
    op.alter_column('rel_type',
                    'template',
                    new_column_name='forward_template',
                    schema='bookbrainz')

    op.add_column('entity_revision',
                  sa.Column('entity_tree_id',
                            sa.INTEGER(),
                            autoincrement=False,
                            nullable=False),
                  schema='bookbrainz')
    op.drop_constraint(u'entity_revision_entity_data_id_fkey',
                       'entity_revision',
                       schema='bookbrainz',
                       type_='foreignkey')
    op.drop_column('entity_revision', 'entity_data_id', schema='bookbrainz')
    op.drop_constraint(u'entity_data_default_alias_id_fkey',
                       'entity_data',
                       schema='bookbrainz',
                       type_='foreignkey')
    op.drop_constraint(u'entity_data_disambiguation_id_fkey',
                       'entity_data',
                       schema='bookbrainz',
                       type_='foreignkey')
    op.drop_constraint(u'entity_data_annotation_id_fkey',
                       'entity_data',
                       schema='bookbrainz',
                       type_='foreignkey')
    op.drop_column('entity_data', 'disambiguation_id', schema='bookbrainz')
    op.drop_column('entity_data', 'default_alias_id', schema='bookbrainz')
    op.drop_column('entity_data', 'annotation_id', schema='bookbrainz')
    op.drop_column('entity', '_type', schema='bookbrainz')

    op.create_table(
        'entity_tree',
        sa.Column('entity_tree_id', sa.INTEGER(), nullable=False),
        sa.Column('annotation_id',
                  sa.INTEGER(),
                  autoincrement=False,
                  nullable=True),
        sa.Column('disambiguation_id',
                  sa.INTEGER(),
                  autoincrement=False,
                  nullable=True),
        sa.Column('data_id', sa.INTEGER(), autoincrement=False,
                  nullable=False),
        sa.Column('default_alias_id',
                  sa.INTEGER(),
                  autoincrement=False,
                  nullable=True),
        sa.ForeignKeyConstraint(['annotation_id'],
                                [u'bookbrainz.annotation.annotation_id'],
                                name=u'entity_tree_annotation_id_fkey'),
        sa.ForeignKeyConstraint(['data_id'],
                                [u'bookbrainz.entity_data.entity_data_id'],
                                name=u'entity_tree_data_id_fkey'),
        sa.ForeignKeyConstraint(['default_alias_id'],
                                [u'bookbrainz.alias.alias_id'],
                                name=u'entity_tree_default_alias_id_fkey'),
        sa.ForeignKeyConstraint(
            ['disambiguation_id'],
            [u'bookbrainz.disambiguation.disambiguation_id'],
            name=u'entity_tree_disambiguation_id_fkey'),
        sa.PrimaryKeyConstraint('entity_tree_id', name=u'entity_tree_pkey'),
        schema='bookbrainz',
        postgresql_ignore_search_path=False)
    op.create_foreign_key(u'entity_revision_entity_tree_id_fkey',
                          'entity_revision',
                          'entity_tree', ['entity_tree_id'],
                          ['entity_tree_id'],
                          source_schema='bookbrainz',
                          referent_schema='bookbrainz')

    op.create_table(
        'entity_tree_alias',
        sa.Column('entity_tree_id',
                  sa.INTEGER(),
                  autoincrement=False,
                  nullable=False),
        sa.Column('alias_id',
                  sa.INTEGER(),
                  autoincrement=False,
                  nullable=False),
        sa.ForeignKeyConstraint(['alias_id'], [u'bookbrainz.alias.alias_id'],
                                name=u'entity_tree_alias_alias_id_fkey'),
        sa.ForeignKeyConstraint(['entity_tree_id'],
                                [u'bookbrainz.entity_tree.entity_tree_id'],
                                name=u'entity_tree_alias_entity_tree_id_fkey'),
        sa.PrimaryKeyConstraint('entity_tree_id',
                                'alias_id',
                                name=u'entity_tree_alias_pkey'),
        schema='bookbrainz')
    op.drop_table('entity_data__alias', schema='bookbrainz')

    op.rename_table('work_data__language',
                    'work_data_language',
                    schema='bookbrainz')
    entity_types.drop(op.get_bind())
Esempio n. 32
0
def downgrade():
    conn = op.get_bind()
    conn.execute(text("set @@lock_wait_timeout = 20;"))
    conn.execute(
        text("ALTER TABLE genericaccount DROP COLUMN folder_separator"))
    conn.execute(text("ALTER TABLE genericaccount DROP COLUMN folder_prefix"))
def upgrade():
    clear_diffusion_level = context.get_x_argument(
        as_dictionary=True).get('clear-diffusion-level')
    if clear_diffusion_level is not None:
        clear_diffusion_level = bool(strtobool(clear_diffusion_level))
    else:
        clear_diffusion_level = True

    op.execute("""
        DROP TRIGGER tri_insert_calculate_sensitivity ON gn_synthese.synthese
    """)
    op.execute("""
        DROP TRIGGER tri_update_calculate_sensitivity ON gn_synthese.synthese
    """)
    op.execute("""
        DROP FUNCTION gn_synthese.fct_tri_cal_sensi_diff_level_on_each_statement
    """)
    op.execute("""
        DROP FUNCTION gn_synthese.fct_tri_cal_sensi_diff_level_on_each_row
    """)
    op.execute("""
        CREATE FUNCTION gn_synthese.fct_tri_calculate_sensitivity_on_each_statement()
         RETURNS trigger
         LANGUAGE plpgsql
        AS $function$ 
            -- Calculate sensitivity on insert in synthese
            BEGIN
            WITH cte AS (
              SELECT 
                id_synthese,
                gn_sensitivity.get_id_nomenclature_sensitivity(
                  new_row.date_min::date, 
                  taxonomie.find_cdref(new_row.cd_nom), 
                  new_row.the_geom_local,
                  jsonb_build_object(
                    'STATUT_BIO', new_row.id_nomenclature_bio_status,
                    'OCC_COMPORTEMENT', new_row.id_nomenclature_behaviour
                  )
                ) AS id_nomenclature_sensitivity
              FROM
                NEW AS new_row
            )
            UPDATE
              gn_synthese.synthese AS s
            SET 
              id_nomenclature_sensitivity = c.id_nomenclature_sensitivity
            FROM
              cte AS c
            WHERE
              c.id_synthese = s.id_synthese
            ;
            RETURN NULL;
            END;
          $function$
        ;
    """)
    op.execute("""
        CREATE FUNCTION gn_synthese.fct_tri_update_sensitivity_on_each_row()
         RETURNS trigger
         LANGUAGE plpgsql
        AS $function$ 
            -- Calculate sensitivity on update in synthese
            BEGIN
            NEW.id_nomenclature_sensitivity = gn_sensitivity.get_id_nomenclature_sensitivity(
                NEW.date_min::date, 
                taxonomie.find_cdref(NEW.cd_nom), 
                NEW.the_geom_local,
                jsonb_build_object(
                  'STATUT_BIO', NEW.id_nomenclature_bio_status,
                  'OCC_COMPORTEMENT', NEW.id_nomenclature_behaviour
                )
            );
            RETURN NEW;
            END;
          $function$
        ;
    """)
    op.execute("""
        CREATE TRIGGER
            tri_insert_calculate_sensitivity
        AFTER
            INSERT
        ON
            gn_synthese.synthese
        REFERENCING
            NEW TABLE AS NEW
        FOR EACH
            STATEMENT
        EXECUTE PROCEDURE
            gn_synthese.fct_tri_calculate_sensitivity_on_each_statement()
    """)
    op.execute("""
        CREATE TRIGGER
            tri_update_calculate_sensitivity
        BEFORE UPDATE OF
            date_min,
            date_max,
            cd_nom,
            the_geom_local,
            id_nomenclature_bio_status,
            id_nomenclature_behaviour
        ON
            gn_synthese.synthese
        FOR EACH
            ROW
        EXECUTE PROCEDURE
            gn_synthese.fct_tri_update_sensitivity_on_each_row()
    """)

    if clear_diffusion_level:
        logger.info("Clearing diffusion level…")
        count = op.get_bind().execute("""
            WITH cleared_rows AS (
                UPDATE
                    gn_synthese.synthese s
                SET
                    id_nomenclature_diffusion_level = NULL
                FROM
                    ref_nomenclatures.t_nomenclatures nomenc_sensitivity,
                    ref_nomenclatures.t_nomenclatures nomenc_diff_level
                WHERE
                    nomenc_sensitivity.id_nomenclature = s.id_nomenclature_sensitivity
                    AND nomenc_diff_level.id_nomenclature = s.id_nomenclature_diffusion_level
                AND nomenc_diff_level.cd_nomenclature = gn_sensitivity.calculate_cd_diffusion_level(NULL, nomenc_sensitivity.cd_nomenclature)
                RETURNING s.id_synthese
            )
            SELECT
                count(*)
            FROM
                cleared_rows;
        """).scalar()
        logger.info("Cleared diffusion level on {} rows.".format(count))
def downgrade():
    restore_diffusion_level = context.get_x_argument(
        as_dictionary=True).get('restore-diffusion-level')
    if restore_diffusion_level is not None:
        restore_diffusion_level = bool(strtobool(restore_diffusion_level))
    else:
        restore_diffusion_level = True

    if restore_diffusion_level:
        logger.info("Restore diffusion level…")
        count = op.get_bind().execute("""
            WITH restored_rows AS (
                UPDATE 
                    gn_synthese.synthese s
                SET
                    id_nomenclature_diffusion_level = ref_nomenclatures.get_id_nomenclature(
                        'NIV_PRECIS',
                        gn_sensitivity.calculate_cd_diffusion_level(
                            NULL,
                            nomenc_sensitivity.cd_nomenclature
                        )
                    )
                FROM
                    ref_nomenclatures.t_nomenclatures nomenc_sensitivity
                WHERE
                    nomenc_sensitivity.id_nomenclature = s.id_nomenclature_sensitivity
                    AND s.id_nomenclature_diffusion_level IS NULL
                RETURNING s.id_synthese
            )
            SELECT
                count(*)
            FROM
                restored_rows
        """).scalar()
        logger.info("Restored diffusion level on {} rows.".format(count))

    op.execute("""
        DROP TRIGGER tri_insert_calculate_sensitivity ON gn_synthese.synthese
    """)
    op.execute("""
        DROP TRIGGER tri_update_calculate_sensitivity ON gn_synthese.synthese
    """)
    op.execute("""
        DROP FUNCTION gn_synthese.fct_tri_calculate_sensitivity_on_each_statement
    """)
    op.execute("""
        DROP FUNCTION gn_synthese.fct_tri_update_sensitivity_on_each_row
    """)
    op.execute("""
        CREATE FUNCTION gn_synthese.fct_tri_cal_sensi_diff_level_on_each_statement()
         RETURNS trigger
         LANGUAGE plpgsql
        AS $function$
          -- Calculate sensitivity and diffusion level on insert in synthese
            BEGIN
            WITH cte AS (
                SELECT
                gn_sensitivity.get_id_nomenclature_sensitivity(
                  updated_rows.date_min::date,
                  taxonomie.find_cdref(updated_rows.cd_nom),
                  updated_rows.the_geom_local,
                  ('{"STATUT_BIO": ' || updated_rows.id_nomenclature_bio_status::text || '}')::jsonb
                ) AS id_nomenclature_sensitivity,
                id_synthese,
                t_diff.cd_nomenclature as cd_nomenclature_diffusion_level
              FROM NEW AS updated_rows
              LEFT JOIN ref_nomenclatures.t_nomenclatures t_diff ON t_diff.id_nomenclature = updated_rows.id_nomenclature_diffusion_level
              WHERE updated_rows.id_nomenclature_sensitivity IS NULL
            )
            UPDATE gn_synthese.synthese AS s
            SET
              id_nomenclature_sensitivity = c.id_nomenclature_sensitivity,
              id_nomenclature_diffusion_level = ref_nomenclatures.get_id_nomenclature(
                'NIV_PRECIS',
                gn_sensitivity.calculate_cd_diffusion_level(
                  c.cd_nomenclature_diffusion_level,
                  t_sensi.cd_nomenclature
                )

              )
            FROM cte AS c
            LEFT JOIN ref_nomenclatures.t_nomenclatures t_sensi ON t_sensi.id_nomenclature = c.id_nomenclature_sensitivity
            WHERE c.id_synthese = s.id_synthese
          ;
            RETURN NULL;
            END;
          $function$
        ;
    """)
    op.execute("""
        CREATE TRIGGER tri_insert_calculate_sensitivity AFTER
        INSERT
            ON
            gn_synthese.synthese REFERENCING NEW TABLE AS NEW FOR EACH STATEMENT EXECUTE PROCEDURE gn_synthese.fct_tri_cal_sensi_diff_level_on_each_statement()
    """)
    op.execute("""
        CREATE FUNCTION gn_synthese.fct_tri_cal_sensi_diff_level_on_each_row()
         RETURNS trigger
         LANGUAGE plpgsql
        AS $function$ 
          -- Calculate sensitivity and diffusion level on update in synthese
          DECLARE calculated_id_sensi integer;
            BEGIN
                SELECT 
                gn_sensitivity.get_id_nomenclature_sensitivity(
                  NEW.date_min::date, 
                  taxonomie.find_cdref(NEW.cd_nom), 
                  NEW.the_geom_local,
                  ('{"STATUT_BIO": ' || NEW.id_nomenclature_bio_status::text || '}')::jsonb
                ) INTO calculated_id_sensi;
              UPDATE gn_synthese.synthese 
              SET 
              id_nomenclature_sensitivity = calculated_id_sensi,
              -- On ne met pas à jour le niveau de diffusion s'il a déjà une valeur
              id_nomenclature_diffusion_level = CASE WHEN OLD.id_nomenclature_diffusion_level IS NULL THEN (
                SELECT ref_nomenclatures.get_id_nomenclature(
                    'NIV_PRECIS',
                    gn_sensitivity.calculate_cd_diffusion_level(
                      ref_nomenclatures.get_cd_nomenclature(OLD.id_nomenclature_diffusion_level),
                      ref_nomenclatures.get_cd_nomenclature(calculated_id_sensi)
                  )
                )
              )
              ELSE OLD.id_nomenclature_diffusion_level
              END
              WHERE id_synthese = OLD.id_synthese
              ;
              RETURN NULL;
            END;
          $function$
        ;
    """)
    op.execute("""
        CREATE TRIGGER tri_update_calculate_sensitivity AFTER
        UPDATE
            OF date_min,
            date_max,
            cd_nom,
            the_geom_local,
            id_nomenclature_bio_status ON
            gn_synthese.synthese FOR EACH ROW EXECUTE PROCEDURE gn_synthese.fct_tri_cal_sensi_diff_level_on_each_row()
    """)
def change_pk_constraint(table_name, columns):
    inspector = reflection.Inspector.from_engine(op.get_bind())
    pk_constraint = inspector.get_pk_constraint(table_name)
    op.drop_constraint(pk_constraint, table_name, type_='primary')
    op.drop_column(table_name, 'listener_id')
    op.create_primary_key(None, table_name, columns)
Esempio n. 36
0
def upgrade():
    # ### commands auto generated by Alembic - please adjust! ###
    conn = op.get_bind()
    op.add_column(
        'chart',
        sa.Column('search_vector',
                  sqlalchemy_utils.types.ts_vector.TSVectorType()))

    sync_trigger(conn, 'chart', 'search_vector',
                 ['title', 'x_axis_title', 'y_axis_title', 'description'])
    conn.execute(
        text("""
                DROP TYPE IF EXISTS tsq_state CASCADE;

                CREATE TYPE tsq_state AS (
                    search_query text,
                    parentheses_stack int,
                    skip_for int,
                    current_token text,
                    current_index int,
                    current_char text,
                    previous_char text,
                    tokens text[]
                );

                CREATE OR REPLACE FUNCTION tsq_append_current_token(state tsq_state)
                RETURNS tsq_state AS $$
                BEGIN
                    IF state.current_token != '' THEN
                        state.tokens := array_append(state.tokens, state.current_token);
                        state.current_token := '';
                    END IF;
                    RETURN state;
                END;
                $$ LANGUAGE plpgsql IMMUTABLE;


                CREATE OR REPLACE FUNCTION tsq_tokenize_character(state tsq_state)
                RETURNS tsq_state AS $$
                BEGIN
                    IF state.current_char = '(' THEN
                        state.tokens := array_append(state.tokens, '(');
                        state.parentheses_stack := state.parentheses_stack + 1;
                        state := tsq_append_current_token(state);
                    ELSIF state.current_char = ')' THEN
                        IF (state.parentheses_stack > 0 AND state.current_token != '') THEN
                            state := tsq_append_current_token(state);
                            state.tokens := array_append(state.tokens, ')');
                            state.parentheses_stack := state.parentheses_stack - 1;
                        END IF;
                    ELSIF state.current_char = '"' THEN
                        state.skip_for := position('"' IN substring(
                            state.search_query FROM state.current_index + 1
                        ));

                        IF state.skip_for > 1 THEN
                            state.tokens = array_append(
                                state.tokens,
                                substring(
                                    state.search_query
                                    FROM state.current_index FOR state.skip_for + 1
                                )
                            );
                        ELSIF state.skip_for = 0 THEN
                            state.current_token := state.current_token || state.current_char;
                        END IF;
                    ELSIF (
                        state.current_char = '-' AND
                        (state.current_index = 1 OR state.previous_char = ' ')
                    ) THEN
                        state.tokens := array_append(state.tokens, '-');
                    ELSIF state.current_char = ' ' THEN
                        state := tsq_append_current_token(state);
                        IF substring(
                            state.search_query FROM state.current_index FOR 4
                        ) = ' or ' THEN
                            state.skip_for := 2;

                            -- remove duplicate OR tokens
                            IF state.tokens[array_length(state.tokens, 1)] != ' | ' THEN
                                state.tokens := array_append(state.tokens, ' | ');
                            END IF;
                        END IF;
                    ELSE
                        state.current_token = state.current_token || state.current_char;
                    END IF;
                    RETURN state;
                END;
                $$ LANGUAGE plpgsql IMMUTABLE;


                CREATE OR REPLACE FUNCTION tsq_tokenize(search_query text) RETURNS text[] AS $$
                DECLARE
                    state tsq_state;
                BEGIN
                    SELECT
                        search_query::text AS search_query,
                        0::int AS parentheses_stack,
                        0 AS skip_for,
                        ''::text AS current_token,
                        0 AS current_index,
                        ''::text AS current_char,
                        ''::text AS previous_char,
                        '{}'::text[] AS tokens
                    INTO state;

                    state.search_query := lower(trim(
                        regexp_replace(search_query, '""+', '""', 'g')
                    ));

                    FOR state.current_index IN (
                        SELECT generate_series(1, length(state.search_query))
                    ) LOOP
                        state.current_char := substring(
                            search_query FROM state.current_index FOR 1
                        );

                        IF state.skip_for > 0 THEN
                            state.skip_for := state.skip_for - 1;
                            CONTINUE;
                        END IF;

                        state := tsq_tokenize_character(state);
                        state.previous_char := state.current_char;
                    END LOOP;
                    state := tsq_append_current_token(state);

                    state.tokens := array_nremove(state.tokens, '(', -state.parentheses_stack);

                    RETURN state.tokens;
                END;
                $$ LANGUAGE plpgsql IMMUTABLE;


                -- Processes an array of text search tokens and returns a tsquery
                CREATE OR REPLACE FUNCTION tsq_process_tokens(config regconfig, tokens text[])
                RETURNS tsquery AS $$
                DECLARE
                    result_query text;
                    previous_value text;
                    value text;
                BEGIN
                    result_query := '';
                    FOREACH value IN ARRAY tokens LOOP
                        IF value = '"' THEN
                            CONTINUE;
                        END IF;

                        IF left(value, 1) = '"' AND right(value, 1) = '"' THEN
                            value := phraseto_tsquery(config, value);
                        ELSIF value NOT IN ('(', ' | ', ')', '-') THEN
                            value := quote_literal(value) || ':*';
                        END IF;

                        IF previous_value = '-' THEN
                            IF value = '(' THEN
                                value := '!' || value;
                            ELSE
                                value := '!(' || value || ')';
                            END IF;
                        END IF;

                        SELECT
                            CASE
                                WHEN result_query = '' THEN value
                                WHEN (
                                    previous_value IN ('!(', '(', ' | ') OR
                                    value IN (')', ' | ')
                                ) THEN result_query || value
                                ELSE result_query || ' & ' || value
                            END
                        INTO result_query;
                        previous_value := value;
                    END LOOP;

                    RETURN to_tsquery(config, result_query);
                END;
                $$ LANGUAGE plpgsql IMMUTABLE;


                CREATE OR REPLACE FUNCTION tsq_process_tokens(tokens text[])
                RETURNS tsquery AS $$
                    SELECT tsq_process_tokens(get_current_ts_config(), tokens);
                $$ LANGUAGE SQL IMMUTABLE;


                CREATE OR REPLACE FUNCTION tsq_parse(config regconfig, search_query text)
                RETURNS tsquery AS $$
                    SELECT tsq_process_tokens(config, tsq_tokenize(search_query));
                $$ LANGUAGE SQL IMMUTABLE;


                CREATE OR REPLACE FUNCTION tsq_parse(config text, search_query text)
                RETURNS tsquery AS $$
                    SELECT tsq_parse(config::regconfig, search_query);
                $$ LANGUAGE SQL IMMUTABLE;


                CREATE OR REPLACE FUNCTION tsq_parse(search_query text) RETURNS tsquery AS $$
                    SELECT tsq_parse(get_current_ts_config(), search_query);
                $$ LANGUAGE SQL IMMUTABLE;


                -- remove first N elements equal to the given value from the array (array
                -- must be one-dimensional)
                --
                -- If negative value is given as the third argument the removal of elements
                -- starts from the last array element.
                CREATE OR REPLACE FUNCTION array_nremove(anyarray, anyelement, int)
                RETURNS ANYARRAY AS $$
                    WITH replaced_positions AS (
                        SELECT UNNEST(
                            CASE
                            WHEN $2 IS NULL THEN
                                '{}'::int[]
                            WHEN $3 > 0 THEN
                                (array_positions($1, $2))[1:$3]
                            WHEN $3 < 0 THEN
                                (array_positions($1, $2))[
                                    (cardinality(array_positions($1, $2)) + $3 + 1):
                                ]
                            ELSE
                                '{}'::int[]
                            END
                        ) AS position
                    )
                    SELECT COALESCE((
                        SELECT array_agg(value)
                        FROM unnest($1) WITH ORDINALITY AS t(value, index)
                        WHERE index NOT IN (SELECT position FROM replaced_positions)
                    ), $1[1:0]);
                $$ LANGUAGE SQL IMMUTABLE;

                """))
Esempio n. 37
0
def upgrade():
    entity_types.create(op.get_bind())
    op.rename_table('work_data_language',
                    'work_data__language',
                    schema='bookbrainz')
    op.add_column('entity',
                  sa.Column('_type', entity_types, nullable=False),
                  schema='bookbrainz')

    # EntityTree -> Entity
    op.create_table('entity_data__alias',
                    sa.Column('entity_data_id', sa.Integer(), nullable=False),
                    sa.Column('alias_id', sa.Integer(), nullable=False),
                    sa.ForeignKeyConstraint(
                        ['alias_id'],
                        ['bookbrainz.alias.alias_id'],
                    ),
                    sa.ForeignKeyConstraint(
                        ['entity_data_id'],
                        ['bookbrainz.entity_data.entity_data_id'],
                    ),
                    sa.PrimaryKeyConstraint('entity_data_id', 'alias_id'),
                    schema='bookbrainz')
    op.drop_table('entity_tree_alias', schema='bookbrainz')

    op.drop_constraint(u'entity_revision_entity_tree_id_fkey',
                       'entity_revision',
                       schema='bookbrainz',
                       type_='foreignkey')
    op.drop_table('entity_tree', schema='bookbrainz')

    op.add_column('entity_data',
                  sa.Column('annotation_id', sa.Integer(), nullable=True),
                  schema='bookbrainz')
    op.add_column('entity_data',
                  sa.Column('default_alias_id', sa.Integer(), nullable=True),
                  schema='bookbrainz')
    op.add_column('entity_data',
                  sa.Column('disambiguation_id', sa.Integer(), nullable=True),
                  schema='bookbrainz')
    op.create_foreign_key(u'entity_data_annotation_id_fkey',
                          'entity_data',
                          'annotation', ['annotation_id'], ['annotation_id'],
                          source_schema='bookbrainz',
                          referent_schema='bookbrainz')
    op.create_foreign_key(u'entity_data_disambiguation_id_fkey',
                          'entity_data',
                          'disambiguation', ['disambiguation_id'],
                          ['disambiguation_id'],
                          source_schema='bookbrainz',
                          referent_schema='bookbrainz')
    op.create_foreign_key(u'entity_data_default_alias_id_fkey',
                          'entity_data',
                          'alias', ['default_alias_id'], ['alias_id'],
                          source_schema='bookbrainz',
                          referent_schema='bookbrainz')

    op.add_column('entity_revision',
                  sa.Column('entity_data_id', sa.Integer(), nullable=False),
                  schema='bookbrainz')
    op.create_foreign_key(u'entity_revision_entity_data_id_fkey',
                          'entity_revision',
                          'entity_data', ['entity_data_id'],
                          ['entity_data_id'],
                          source_schema='bookbrainz',
                          referent_schema='bookbrainz')
    op.drop_column('entity_revision', 'entity_tree_id', schema='bookbrainz')
    op.alter_column('rel_type',
                    'forward_template',
                    new_column_name='template',
                    schema='bookbrainz')
Esempio n. 38
0
def upgrade_data():
    con = op.get_bind()
    sql = f"update {get_inv()}.device set active='t';"
    con.execute(sql)
def downgrade():
    op.drop_table('user_subscriptions')
    template_enum.drop(op.get_bind(), checkfirst=False)
    type_enum.drop(op.get_bind(), checkfirst=False)
def upgrade():
    # Create tables for the new models.
    op.create_table(
        "sl_columns",
        # AuditMixinNullable
        sa.Column("created_on", sa.DateTime(), nullable=True),
        sa.Column("changed_on", sa.DateTime(), nullable=True),
        sa.Column("created_by_fk", sa.Integer(), nullable=True),
        sa.Column("changed_by_fk", sa.Integer(), nullable=True),
        # ExtraJSONMixin
        sa.Column("extra_json", sa.Text(), nullable=True),
        # ImportExportMixin
        sa.Column("uuid", UUIDType(binary=True), primary_key=False, default=uuid4),
        # Column
        sa.Column("id", sa.INTEGER(), autoincrement=True, nullable=False),
        sa.Column("name", sa.TEXT(), nullable=False),
        sa.Column("type", sa.TEXT(), nullable=False),
        sa.Column("expression", sa.TEXT(), nullable=False),
        sa.Column("is_physical", sa.BOOLEAN(), nullable=False, default=True,),
        sa.Column("description", sa.TEXT(), nullable=True),
        sa.Column("warning_text", sa.TEXT(), nullable=True),
        sa.Column("unit", sa.TEXT(), nullable=True),
        sa.Column("is_temporal", sa.BOOLEAN(), nullable=False),
        sa.Column("is_spatial", sa.BOOLEAN(), nullable=False, default=False,),
        sa.Column("is_partition", sa.BOOLEAN(), nullable=False, default=False,),
        sa.Column("is_aggregation", sa.BOOLEAN(), nullable=False, default=False,),
        sa.Column("is_additive", sa.BOOLEAN(), nullable=False, default=False,),
        sa.Column("is_increase_desired", sa.BOOLEAN(), nullable=False, default=True,),
        sa.Column(
            "is_managed_externally",
            sa.Boolean(),
            nullable=False,
            server_default=sa.false(),
        ),
        sa.Column("external_url", sa.Text(), nullable=True),
        sa.PrimaryKeyConstraint("id"),
    )
    with op.batch_alter_table("sl_columns") as batch_op:
        batch_op.create_unique_constraint("uq_sl_columns_uuid", ["uuid"])

    op.create_table(
        "sl_tables",
        # AuditMixinNullable
        sa.Column("created_on", sa.DateTime(), nullable=True),
        sa.Column("changed_on", sa.DateTime(), nullable=True),
        sa.Column("created_by_fk", sa.Integer(), nullable=True),
        sa.Column("changed_by_fk", sa.Integer(), nullable=True),
        # ExtraJSONMixin
        sa.Column("extra_json", sa.Text(), nullable=True),
        # ImportExportMixin
        sa.Column("uuid", UUIDType(binary=True), primary_key=False, default=uuid4),
        # Table
        sa.Column("id", sa.INTEGER(), autoincrement=True, nullable=False),
        sa.Column("database_id", sa.INTEGER(), autoincrement=False, nullable=False),
        sa.Column("catalog", sa.TEXT(), nullable=True),
        sa.Column("schema", sa.TEXT(), nullable=True),
        sa.Column("name", sa.TEXT(), nullable=False),
        sa.Column(
            "is_managed_externally",
            sa.Boolean(),
            nullable=False,
            server_default=sa.false(),
        ),
        sa.Column("external_url", sa.Text(), nullable=True),
        sa.ForeignKeyConstraint(["database_id"], ["dbs.id"], name="sl_tables_ibfk_1"),
        sa.PrimaryKeyConstraint("id"),
    )
    with op.batch_alter_table("sl_tables") as batch_op:
        batch_op.create_unique_constraint("uq_sl_tables_uuid", ["uuid"])

    op.create_table(
        "sl_table_columns",
        sa.Column("table_id", sa.INTEGER(), autoincrement=False, nullable=False),
        sa.Column("column_id", sa.INTEGER(), autoincrement=False, nullable=False),
        sa.ForeignKeyConstraint(
            ["column_id"], ["sl_columns.id"], name="sl_table_columns_ibfk_2"
        ),
        sa.ForeignKeyConstraint(
            ["table_id"], ["sl_tables.id"], name="sl_table_columns_ibfk_1"
        ),
    )

    op.create_table(
        "sl_datasets",
        # AuditMixinNullable
        sa.Column("created_on", sa.DateTime(), nullable=True),
        sa.Column("changed_on", sa.DateTime(), nullable=True),
        sa.Column("created_by_fk", sa.Integer(), nullable=True),
        sa.Column("changed_by_fk", sa.Integer(), nullable=True),
        # ExtraJSONMixin
        sa.Column("extra_json", sa.Text(), nullable=True),
        # ImportExportMixin
        sa.Column("uuid", UUIDType(binary=True), primary_key=False, default=uuid4),
        # Dataset
        sa.Column("id", sa.INTEGER(), autoincrement=True, nullable=False),
        sa.Column("sqlatable_id", sa.INTEGER(), nullable=True),
        sa.Column("name", sa.TEXT(), nullable=False),
        sa.Column("expression", sa.TEXT(), nullable=False),
        sa.Column("is_physical", sa.BOOLEAN(), nullable=False, default=False,),
        sa.Column(
            "is_managed_externally",
            sa.Boolean(),
            nullable=False,
            server_default=sa.false(),
        ),
        sa.Column("external_url", sa.Text(), nullable=True),
        sa.PrimaryKeyConstraint("id"),
    )
    with op.batch_alter_table("sl_datasets") as batch_op:
        batch_op.create_unique_constraint("uq_sl_datasets_uuid", ["uuid"])
        batch_op.create_unique_constraint(
            "uq_sl_datasets_sqlatable_id", ["sqlatable_id"]
        )

    op.create_table(
        "sl_dataset_columns",
        sa.Column("dataset_id", sa.INTEGER(), autoincrement=False, nullable=False),
        sa.Column("column_id", sa.INTEGER(), autoincrement=False, nullable=False),
        sa.ForeignKeyConstraint(
            ["column_id"], ["sl_columns.id"], name="sl_dataset_columns_ibfk_2"
        ),
        sa.ForeignKeyConstraint(
            ["dataset_id"], ["sl_datasets.id"], name="sl_dataset_columns_ibfk_1"
        ),
    )

    op.create_table(
        "sl_dataset_tables",
        sa.Column("dataset_id", sa.INTEGER(), autoincrement=False, nullable=False),
        sa.Column("table_id", sa.INTEGER(), autoincrement=False, nullable=False),
        sa.ForeignKeyConstraint(
            ["dataset_id"], ["sl_datasets.id"], name="sl_dataset_tables_ibfk_1"
        ),
        sa.ForeignKeyConstraint(
            ["table_id"], ["sl_tables.id"], name="sl_dataset_tables_ibfk_2"
        ),
    )

    # migrate existing datasets to the new models
    bind = op.get_bind()
    session = db.Session(bind=bind)  # pylint: disable=no-member

    datasets = session.query(SqlaTable).all()
    for dataset in datasets:
        dataset.fetch_columns_and_metrics(session)
        after_insert(target=dataset)
def upgrade():
    conn = op.get_bind()
    conn.execute("""UPDATE alturas SET codprov=substring(codpostal, 1, 1)""")
    conn.execute(
        """UPDATE alturas SET cp=substring(codpostal, 2, 4)::integer""")
Esempio n. 42
0
def downgrade():
    # ### commands auto generated by Alembic - please adjust! ###
    connection = op.get_bind()
    connection.execute(keys_table.update().where(
        keys_table.c.type == 'regex').values(data=None))
Esempio n. 43
0
def _perform(upgrade):
    conn = op.get_bind()

    sample_list = conn.execute("SELECT id, file_size, file_type, md5, crc32, "
                               "sha1, sha256, sha512, ssdeep FROM samples")

    samples = []
    for sample in sample_list:
        samples.append({
            "id": sample[0],
            "file_size": sample[1],
            "file_type": sample[2],
            "md5": sample[3],
            "crc32": sample[4],
            "sha1": sample[5],
            "sha256": sample[6],
            "sha512": sample[7],
            "ssdeep": sample[8],
        })

    # PostgreSQL and MySQL have different names for the foreign key of
    # Task.sample_id -> Sample.id; for SQLite we don't drop/recreate the
    # foreign key.
    fkey_name = {
        "mysql": "tasks_ibfk_1",
        "postgresql": "tasks_sample_id_fkey",
    }

    fkey = fkey_name.get(db.Database(schema_check=False).engine.name)

    # First drop the foreign key.
    if fkey:
        op.drop_constraint(fkey, "tasks", type_="foreignkey")

    # Rename original table.
    op.rename_table("samples", "old_samples")

    # Drop old table.
    op.drop_table("old_samples")

    if upgrade:
        file_type = sa.Text()
    else:
        file_type = sa.String(255)

        # As downgrading implies trimming file_type's to 255 bytes we force
        # this for every available record.
        for sample in samples:
            sample["file_type"] = sample["file_type"][:255]

    # Create the new table with 1.2 schema.
    # Changelog:
    # * file_type changed its type from String(255) to Text().
    op.create_table("samples", sa.Column("id", sa.Integer(), nullable=False),
                    sa.Column("file_size", sa.Integer(), nullable=False),
                    sa.Column("file_type", file_type, nullable=False),
                    sa.Column("md5", sa.String(32), nullable=False),
                    sa.Column("crc32", sa.String(8), nullable=False),
                    sa.Column("sha1", sa.String(40), nullable=False),
                    sa.Column("sha256", sa.String(64), nullable=False),
                    sa.Column("sha512", sa.String(128), nullable=False),
                    sa.Column("ssdeep", sa.Text(), nullable=True),
                    sa.PrimaryKeyConstraint("id"))

    # Insert data.
    op.bulk_insert(db.Sample.__table__, samples)

    # Restore the indices.
    op.create_index("hash_index",
                    "samples", ["md5", "crc32", "sha1", "sha256", "sha512"],
                    unique=True)

    # Create the foreign key.
    if fkey:
        op.create_foreign_key(fkey, "tasks", "samples", ["sample_id"], ["id"])
Esempio n. 44
0
def upgrade():
    # we update the name of the primary and foreign keys, and sequence id for report
    bind = op.get_bind()
    bind.execute(
        "ALTER INDEX IF EXISTS status_report_pkey RENAME TO report_pkey")

    bind.execute(
        "ALTER SEQUENCE IF EXISTS status_report_id_seq RENAME TO report_id_seq"
    )

    # ### commands auto generated by Alembic - please adjust! ###
    op.drop_table("document_incident")
    op.drop_constraint("assoc_incident_tags_incident_id_fkey",
                       "assoc_incident_tags",
                       type_="foreignkey")
    op.drop_constraint("assoc_incident_tags_tag_id_fkey",
                       "assoc_incident_tags",
                       type_="foreignkey")
    op.create_foreign_key(None,
                          "assoc_incident_tags",
                          "tag", ["tag_id"], ["id"],
                          ondelete="CASCADE")
    op.create_foreign_key(None,
                          "assoc_incident_tags",
                          "incident", ["incident_id"], ["id"],
                          ondelete="CASCADE")
    op.drop_constraint("assoc_incident_terms_incident_id_fkey",
                       "assoc_incident_terms",
                       type_="foreignkey")
    op.drop_constraint("assoc_incident_terms_term_id_fkey",
                       "assoc_incident_terms",
                       type_="foreignkey")
    op.create_foreign_key(None,
                          "assoc_incident_terms",
                          "term", ["term_id"], ["id"],
                          ondelete="CASCADE")
    op.create_foreign_key(None,
                          "assoc_incident_terms",
                          "incident", ["incident_id"], ["id"],
                          ondelete="CASCADE")
    op.drop_constraint("conference_incident_id_fkey",
                       "conference",
                       type_="foreignkey")
    op.create_foreign_key(None,
                          "conference",
                          "incident", ["incident_id"], ["id"],
                          ondelete="CASCADE")
    op.drop_constraint("conversation_incident_id_fkey",
                       "conversation",
                       type_="foreignkey")
    op.create_foreign_key(None,
                          "conversation",
                          "incident", ["incident_id"], ["id"],
                          ondelete="CASCADE")
    op.drop_constraint("document_incident_id_fkey",
                       "document",
                       type_="foreignkey")
    op.create_foreign_key(None,
                          "document",
                          "incident", ["incident_id"], ["id"],
                          ondelete="CASCADE")
    op.drop_constraint("event_individual_id_fkey", "event", type_="foreignkey")
    op.drop_constraint("event_incident_id_fkey", "event", type_="foreignkey")
    op.create_foreign_key(None,
                          "event",
                          "incident", ["incident_id"], ["id"],
                          ondelete="CASCADE")
    op.create_foreign_key(None,
                          "event",
                          "individual_contact", ["individual_id"], ["id"],
                          ondelete="CASCADE")
    op.drop_constraint("feedback_incident_id_fkey",
                       "feedback",
                       type_="foreignkey")
    op.create_foreign_key(None,
                          "feedback",
                          "incident", ["incident_id"], ["id"],
                          ondelete="CASCADE")
    op.drop_constraint("group_incident_id_fkey", "group", type_="foreignkey")
    op.create_foreign_key(None,
                          "group",
                          "incident", ["incident_id"], ["id"],
                          ondelete="CASCADE")
    op.drop_constraint("participant_incident_id_fkey",
                       "participant",
                       type_="foreignkey")
    op.create_foreign_key(None,
                          "participant",
                          "incident", ["incident_id"], ["id"],
                          ondelete="CASCADE")
    op.drop_constraint("participant_role_participant_id_fkey",
                       "participant_role",
                       type_="foreignkey")
    op.create_foreign_key(None,
                          "participant_role",
                          "participant", ["participant_id"], ["id"],
                          ondelete="CASCADE")
    op.drop_constraint("report_incident_id_fkey", "report", type_="foreignkey")
    op.create_foreign_key(None,
                          "report",
                          "incident", ["incident_id"], ["id"],
                          ondelete="CASCADE")
    op.drop_constraint("storage_incident_id_fkey",
                       "storage",
                       type_="foreignkey")
    op.create_foreign_key(None,
                          "storage",
                          "incident", ["incident_id"], ["id"],
                          ondelete="CASCADE")
    op.drop_constraint("task_incident_id_fkey", "task", type_="foreignkey")
    op.create_foreign_key(None,
                          "task",
                          "incident", ["incident_id"], ["id"],
                          ondelete="CASCADE")
    op.drop_constraint("ticket_incident_id_fkey", "ticket", type_="foreignkey")
    op.create_foreign_key(None,
                          "ticket",
                          "incident", ["incident_id"], ["id"],
                          ondelete="CASCADE")
    op.drop_constraint("workflow_instance_incident_id_fkey",
                       "workflow_instance",
                       type_="foreignkey")
    op.create_foreign_key(None,
                          "workflow_instance",
                          "incident", ["incident_id"], ["id"],
                          ondelete="CASCADE")
def downgrade():
    # ### commands auto generated by Alembic - please adjust! ###
    op.drop_table("workspace_subscriptions")
    sa.Enum(name="workspacesubscriptionstate").drop(op.get_bind(), checkfirst=False)
def downgrade():
    # ### commands auto generated by Alembic - please adjust! ###
    op.drop_column('problem', 'classification_type')
    classification_type_enum = postgresql.ENUM('binary', 'multi-label', 'multi-class', name='classification_type_enum')
    classification_type_enum.drop(op.get_bind())
Esempio n. 47
0
def postprocess_moves():

    Base = declarative_base()
    Session = sessionmaker(bind=op.get_bind())

    class Move(Base):
        __tablename__ = 'move'
        id = sa.Column(sa.Integer, name="id", primary_key=True)
        activity = sa.Column(sa.String, name='activity')

        location_address = sa.Column(sa.String, name="location_address")
        location_raw = sa.Column(sa.String, name="location_raw")
        gps_center_max_distance = sa.Column(sa.Float,
                                            name="gps_center_max_distance")
        gps_center_latitude = sa.Column(sa.Float, name="gps_center_latitude")
        gps_center_longitude = sa.Column(sa.Float, name="gps_center_longitude")

    class Sample(Base):
        __tablename__ = 'sample'
        id = sa.Column(sa.Integer, name="id", primary_key=True)

        moveId = sa.Column(sa.Integer,
                           sa.ForeignKey(Move.id),
                           name="move_id",
                           nullable=False)
        move = sa.orm.relationship(Move,
                                   backref=sa.orm.backref('samples',
                                                          lazy='dynamic'))

        sample_type = sa.Column(sa.String, name='sample_type')

        longitude = sa.Column(sa.Float, name='longitude')
        latitude = sa.Column(sa.Float, name='latitude')

    session = Session()
    moves_count = session.query(Move).count()
    for idx, move in enumerate(session.query(Move)):

        print(u"processing move %d/%d" % (idx + 1, moves_count))

        gps_samples = [
            sample for sample in move.samples
            if sample.sample_type and sample.sample_type.startswith('gps-')
        ]

        if len(gps_samples) > 0:
            print(u"  got %d GPS samples for move %d: %s" %
                  (len(gps_samples), move.id, move.activity))

            gps_center = calculate_gps_center(gps_samples)
            move.gps_center_latitude = gps_center[0]
            move.gps_center_longitude = gps_center[1]

            gps_center_degrees = [radian_to_degree(x) for x in gps_center]

            gps_center_max_distance = 0
            for sample in gps_samples:
                point = (sample.latitude, sample.longitude)
                point_degrees = [radian_to_degree(x) for x in point]
                distance = vincenty(gps_center_degrees, point_degrees).meters
                gps_center_max_distance = max(gps_center_max_distance,
                                              distance)

            move.gps_center_max_distance = gps_center_max_distance

            first_sample = gps_samples[0]
            latitude = first_sample.latitude
            longitude = first_sample.longitude

            geolocator = Nominatim()
            location = geolocator.reverse(
                "%f, %f" %
                (radian_to_degree(latitude), radian_to_degree(longitude)))
            move.location_address = location.address
            move.location_raw = json.dumps(location.raw)
        else:
            print(u"  got no GPS samples for move %d: %s" %
                  (move.id, move.activity))
def table(name):
    engine = op.get_bind().engine
    metadata = sa.MetaData(engine)
    metadata.reflect(only=[name])
    return metadata.tables[name]
Esempio n. 49
0
def upgrade():
    conn = op.get_bind()
    conn.execute(
        "ALTER TABLE admiral_equipment ALTER COLUMN locked TYPE BOOLEAN USING CASE WHEN locked=0 THEN false ELSE true END;"
    )
Revision ID: aed8bac5375d
Revises: 
Create Date: 2019-12-10 13:30:45.562666

"""
from alembic import op
import sqlalchemy as sa


# revision identifiers, used by Alembic.
revision = 'aed8bac5375d'
down_revision = None
branch_labels = None
depends_on = None
connection = op.get_bind()

def upgrade():
    # ### commands auto generated by Alembic - please adjust! ###
    op.create_table('BookTypes',
    sa.Column('type', sa.String(length=20), nullable=False),
    sa.Column('charge', sa.Float(), nullable=False),
    sa.PrimaryKeyConstraint('type'),
    sa.UniqueConstraint('type')
    )
    connection.execute("""
                    INSERT INTO "BookTypes"("type", "charge")
                	VALUES ('regular','1.5'), ('fiction','3'), ('novel','1.5');    
                    """)
    # ### end Alembic commands ###
Esempio n. 51
0
def migrate_watch_assoc():
    """
    Migrates watch targets to watch assocs
    :return:
    """
    # Data migration - online mode only
    if context.is_offline_mode():
        logger.warning('Data migration skipped in the offline mode')
        return

    def strip(x):
        if x is None:
            return None
        return x.strip()

    def target_key(t):
        scheme, host, port = t.scan_scheme, t.scan_host, t.scan_port
        if scheme is None:
            scheme = 'https'
        if port is not None:
            port = int(port)
        if port is None:
            port = 443
        if scheme == 'http':
            scheme = 'https'
        if scheme == 'htttp':
            scheme = 'https'
        if port == 80 or port <= 10 or port >= 65535:
            port = 443
        host = strip(host)
        if host is not None:
            if host.startswith('*.'):
                host = host[2:]
            if host.startswith('%.'):
                host = host[2:]
        return scheme, host, port

    target_db = {}
    already_assoc = set()
    duplicates = []

    bind = op.get_bind()
    sess = BaseSession(bind=bind)
    it = sess.query(DbWatchTarget).yield_per(1000)
    for rec in it:
        ck = target_key(rec)
        rec_assoc = rec

        if ck in target_db:
            rec_assoc = target_db[ck]
            duplicates.append(rec.id)
        else:
            target_db[ck] = rec
            rec.scan_scheme = ck[0]
            rec.scan_host = ck[1]
            rec.scan_port = ck[2]

        if rec.user_id is None:
            continue

        cur_assoc_key = rec_assoc.id, rec.user_id
        if cur_assoc_key in already_assoc:
            print('already assoc: %s' % (cur_assoc_key, ))
            continue
        already_assoc.add(cur_assoc_key)

        assoc = DbWatchAssoc()
        assoc.scan_type = 1
        assoc.created_at = rec_assoc.created_at
        assoc.updated_at = rec_assoc.updated_at
        assoc.scan_periodicity = rec_assoc.scan_periodicity
        assoc.user_id = rec.user_id  # actual record!
        assoc.watch_id = rec_assoc.id
        sess.add(assoc)
    sess.commit()

    # remove duplicates
    if len(duplicates) > 0:
        sess.query(DbWatchTarget).filter(DbWatchTarget.id.in_(list(duplicates))) \
            .delete(synchronize_session='fetch')
        sess.commit()
        print('Removed %s duplicates %s' % (len(duplicates), duplicates))
Esempio n. 52
0
def downgrade():
    conn = op.get_bind()
    conn.execute(
        "ALTER TABLE admiral_equipment ALTER COLUMN locked TYPE INTEGER USING CASE WHEN false THEN 0 ELSE 1 END;"
    )
def copy_and_move_data(resources_table_name):
    global variable_association_id_counter

    connection = op.get_bind()

    # Our table shapes - we avoid directly working with SA models
    # since we have to deal with coexistence of old and new schema.
    #
    # Note how pluralization works - 'cells' vs 'cell_variables'.
    # Fortunately all of our resources pluralize by just adding 's'.
    resource_name = resources_table_name[:-1]
    resources = sa.sql.table(
        resources_table_name, sa.sql.column('id', sa.Integer),
        sa.sql.column('variable_association_id', sa.Integer))
    resource_variables = sa.sql.table(
        resource_name + '_variables', sa.sql.column('created_at', sa.DateTime),
        sa.sql.column('updated_at', sa.DateTime),
        sa.sql.column('parent_id', sa.Integer),
        sa.sql.column('key', sa.String),
        sa.sql.column('value', sqlalchemy_utils.types.json.JSONType))
    variables = sa.sql.table(
        'variables', sa.sql.column('created_at', sa.DateTime),
        sa.sql.column('updated_at', sa.DateTime),
        sa.sql.column('association_id', sa.Integer),
        sa.sql.column('key_', sa.String),
        sa.sql.column('value_', sqlalchemy_utils.types.json.JSONType))
    variable_association = sa.sql.table(
        'variable_association', sa.sql.column('created_at', sa.DateTime),
        sa.sql.column('id', sa.Integer),
        sa.sql.column('discriminator', sa.String))

    # A smarter query might be possible on Postgres, but I do not
    # believe common table expressions (CTEs) are available on MySQL,
    # and certainly not SQLite. Let's just keep it really simple for
    # now. At least key/values are copied over using select into, and
    # avoid serializing in/out of the database.

    for resource in connection.execute(resources.select()):
        variable_association_id_counter += 1

        # add variable_association_id value...
        connection.execute(
            resources.update().where(resources.c.id == resource.id).values(
                variable_association_id=sa.literal(
                    variable_association_id_counter)))

        # create specific association - there is an additional level
        # of indirection, hence "polymorphic association"
        connection.execute(variable_association.insert().values(
            created_at=timeutils.utcnow(),
            id=variable_association_id_counter,
            discriminator=resource_name))

        # copy over into 'variables'
        connection.execute(variables.insert().from_select(
            variables.c.keys(),
            sa.select([
                resource_variables.c.created_at,
                resource_variables.c.updated_at,
                # only insert variables associated with this resource
                sa.literal(variable_association_id_counter),
                resource_variables.c.key,
                resource_variables.c.value
            ]).where(resource_variables.c.parent_id == resource.id)))

    op.drop_table(resource_name + '_variables')
Esempio n. 54
0
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql

# revision identifiers, used by Alembic.
from sqlalchemy.orm import Session, relationship
from sqlalchemy.ext.declarative import declarative_base
from tqdm import tqdm

revision = 'fddd82f918b7'
down_revision = 'b7b04e64002a'
branch_labels = None
depends_on = None

Base = declarative_base()
bind = op.get_bind()
session = Session(bind=bind)
db = sa


def upgrade():
    op.add_column('tallySheet', sa.Column('areaId',
                                          sa.Integer(),
                                          nullable=True))
    op.add_column('tallySheet',
                  sa.Column('electionId', sa.Integer(), nullable=True))
    op.add_column('tallySheet',
                  sa.Column('latestVersionId', sa.Integer(), nullable=True))
    op.add_column('tallySheetVersion',
                  sa.Column('tallySheetId', sa.Integer(), nullable=True))
Esempio n. 55
0
def upgrade():
    # ### commands auto generated by Alembic - please adjust! ###
    op.create_table(
        'PlagiarismRun', sa.Column('id', sa.Integer(), nullable=False),
        sa.Column('log', sa.Unicode(), nullable=True),
        sa.Column('json_config', sa.Unicode(), nullable=True),
        sa.Column('assignment_id', sa.Integer(), nullable=True),
        sa.ForeignKeyConstraint(
            ['assignment_id'],
            ['Assignment.id'],
        ), sa.PrimaryKeyConstraint('id'))
    enum = sa.Enum('running', 'done', 'crashed', name='plagiarismtate')
    enum.create(op.get_bind(), checkfirst=True)
    op.add_column(
        'PlagiarismRun',
        sa.Column('state', enum, nullable=False),
    )

    op.create_table(
        'PlagiarismCase', sa.Column('id', sa.Integer(), nullable=False),
        sa.Column('work1_id', sa.Integer(), nullable=True),
        sa.Column('work2_id', sa.Integer(), nullable=True),
        sa.Column('created_at', sa.DateTime(), nullable=True),
        sa.Column('plagiarism_run_id', sa.Integer(), nullable=True),
        sa.Column('match_avg', sa.Float(), nullable=False),
        sa.Column('match_max', sa.Float(), nullable=False),
        sa.ForeignKeyConstraint(['plagiarism_run_id'], ['PlagiarismRun.id'],
                                ondelete='CASCADE'),
        sa.ForeignKeyConstraint(['work1_id'], ['Work.id'], ondelete='CASCADE'),
        sa.ForeignKeyConstraint(['work2_id'], ['Work.id'], ondelete='CASCADE'),
        sa.PrimaryKeyConstraint('id'))
    op.create_table(
        'PlagiarismMatch', sa.Column('id', sa.Integer(), nullable=False),
        sa.Column('file1_id', sa.Integer(), nullable=True),
        sa.Column('file2_id', sa.Integer(), nullable=True),
        sa.Column('file1_start', sa.Integer(), nullable=False),
        sa.Column('file1_end', sa.Integer(), nullable=False),
        sa.Column('file2_start', sa.Integer(), nullable=False),
        sa.Column('file2_end', sa.Integer(), nullable=False),
        sa.Column('plagiarism_case_id', sa.Integer(), nullable=True),
        sa.ForeignKeyConstraint(
            ['file1_id'],
            ['File.id'],
        ), sa.ForeignKeyConstraint(
            ['file2_id'],
            ['File.id'],
        ),
        sa.ForeignKeyConstraint(['plagiarism_case_id'], ['PlagiarismCase.id'],
                                ondelete='CASCADE'),
        sa.PrimaryKeyConstraint('id'))

    conn = op.get_bind()
    conn.execute(
        text("""
    INSERT INTO "Permission" (name, default_value, course_permission)
    SELECT 'can_view_plagiarism', false, true WHERE NOT EXISTS
        (SELECT 1 FROM "Permission" WHERE name = 'can_view_plagiarism')
    """))
    conn.execute(
        text("""
    INSERT INTO "Permission" (name, default_value, course_permission)
    SELECT 'can_manage_plagiarism', false, true WHERE NOT EXISTS
        (SELECT 1 FROM "Permission" WHERE name = 'can_manage_plagiarism')
    """))
Esempio n. 56
0
def upgrade_data():
    connection = op.get_bind()

    # upgrade release data from 5.0 to 5.1
    upgrade_releases(connection)
    upgrade_clusters_replaced_info(connection)
Esempio n. 57
0
def upgrade():
	# create new type
    user_status_enum.create(op.get_bind())

    ### commands auto generated by Alembic - please adjust! ###
    op.add_column('user', sa.Column('status', user_status_enum, nullable=False))
def upgrade():
    conn = op.get_bind()
    conn.execute(
        sa.text("""create or replace function professor_user_link_check()
  returns trigger as $trig$
begin
  if tg_op = 'DELETE' and old.user_id is not null then
    if (select "type" from users where id = old.user_id) = 'p' then
      raise exception 'user type is "p"';
    end if;
  end if;

  if (tg_op != 'DELETE' and new.user_id is not null and old.user_id is null) then
    -- linking
    if (select "type" from users where id = new.user_id) != 'p' then
      raise exception 'user type is not "p"';
    end if;
  end if;

  if (tg_op != 'DELETE' and new.user_id is null and old.user_id is not null) then
    -- unlinking
    if (select "type" from users where id = old.user_id) = 'p' then
      raise exception 'user type is "p"';
    end if;
  end if;

  return new;
end;
$trig$ language plpgsql;

create constraint trigger professor_user_link_trig after insert or update of user_id or delete
  on professors
  deferrable initially deferred
for each row
execute procedure professor_user_link_check();"""))
    conn.execute(
        sa.text("""create or replace function user_professor_link_check()
  returns trigger as $trig$
begin
  if (tg_op = 'INSERT' and new.type != 'p') or (tg_op = 'UPDATE' and old.type != 'p' and new.type != 'p') then
    return new;
  end if;

  if (new.type = 'p') then
    -- linking
    if not (select exists(select 1 from professors where user_id = new.id)) then
      raise exception 'no professor is linked to user with id "%"', new.id;
    end if;
  else
    -- unlinking
    if (select exists(select 1 from professors where user_id = new.id)) then
      raise exception 'a professor is linked to user with id "%"', new.id;
    end if;
  end if;

  return new;
end;
$trig$ language plpgsql;

create constraint trigger user_professor_link_trig after insert or update of "type"
  on users
  deferrable initially deferred
for each row
execute procedure user_professor_link_check();"""))
def delete_func_key(func_key_id):
    query = (func_key_table
             .delete()
             .where(func_key_table.c.destination_type_id == DESTINATION_PARKING_ID))

    op.get_bind().execute(query)
def upgrade():
    conn = op.get_bind()
    conn.execute('drop function if exists update_courses(numeric, jsonb)')

    conn.execute(
        sa.text(
            """create or replace function update_departments(_university_id numeric, _json jsonb)
  returns numeric as $func$
declare
  _s_id   numeric;
  _d_id   numeric;
  _count  numeric := 0;
  _abbr   varchar;
  _name   varchar;
  _school varchar;
begin
  for _abbr, _name, _school in
  select
    department ->> 'value' as _abbr,
    (regexp_matches(department ->> 'label', '.+(?=\()')) [1] as _name,
    department ->> 'school' as _school
  from jsonb_array_elements(_json -> 'departments') department
  loop
    -- get the school id
    select id
    into _s_id
    from schools
    where abbreviation = _school and university_id = _university_id;

    -- get the department id if it exists
    select id
    into _d_id
    from departments
    where school_id = _s_id and abbreviation = _abbr;

    -- if department does not exist, create it
    if _d_id is null
    then
      insert into departments (abbreviation, name, school_id) values (_abbr, _name, _s_id);
    end if;

    _count = _count + 1;

  end loop;

  return _count;
end;
$func$ language plpgsql;"""))
    conn.execute(
        sa.text(
            """create or replace function update_courses(_university_id numeric, _json jsonb)
  returns numeric as $func$
declare
  _d_id           numeric;
  _c_id           numeric;
  _p_id           numeric;
  _quarter        numeric;
  _latest_quarter numeric;
  _s_id           numeric;

  _department     varchar;
  _number         varchar;
  _title          varchar;

  _professor1     varchar[];
  _professor2     varchar[];
  _professor3     varchar[];

  _professors     varchar[][];
  _professor      varchar[];

  _count          numeric := 0;
  _new_course     boolean := false;

begin
  for
    _quarter,
    _department,
    _number,
    _title,
    _professor1,
    _professor2,
    _professor3
  in
  select
    (course ->> 'term')::int as _quarter,
    course ->> 'subject' as _department,
    course ->> 'catalog_nbr' as _number,
    course ->> 'class_descr' as _title,

    -- prof #1
    case
    when (course ->> 'instr_1') like '%, %' then
        array[
          split_part(course ->> 'instr_1', ', ', 1),
          split_part(course ->> 'instr_1', ', ', 2)
        ]
    when (course ->> 'instr_1') = '' then
        null
    end as _professor1,

    -- prof #2
    case
    when (course ->> 'instr_2') like '%, %' then
      array[
      split_part(course ->> 'instr_2', ', ', 1),
      split_part(course ->> 'instr_2', ', ', 2)
      ]
    when (course ->> 'instr_2') = '' then
      null
    end as _professor2,

    -- prof #3
    case
    when (course ->> 'instr_3') like '%, %' then
      array[
      split_part(course ->> 'instr_3', ', ', 1),
      split_part(course ->> 'instr_3', ', ', 2)
      ]
    when (course ->> 'instr_3') = '' then
      null
    end as _professor3

  from jsonb_array_elements(_json -> 'courses') course
  loop

    if _professor1 is null then continue; end if;

    -- get the department id (assume it exists)
    select departments.id into _d_id
    from departments
    where abbreviation = _department
    order by school_id limit 1;

    -- get the course id if it exists
    select id into _c_id
    from courses
    where department_id = _d_id and number = _number;

    -- if the course does not exist, create it
    if _c_id is null then
      insert into courses (department_id, number, title) values (_d_id, _number, _title)
      returning id into _c_id;

      _new_course = true;
    end if;

    -- get the section id if it exists
    select id into _s_id
    from sections
    where quarter_id = _quarter and course_id = _c_id;

    -- if the section does not exist, create it
    if _s_id is null then
      insert into sections (quarter_id, course_id) values (_quarter, _c_id)
      returning id into _s_id;
    end if;

    _professors = array[_professor1];
    if _professor2 is not null then _professors = array_cat(_professors, _professor2); end if;
    if _professor3 is not null then _professors = array_cat(_professors, _professor3); end if;

    foreach _professor slice 1 in array _professors
    loop

      if _professor[1] is null then continue; end if;

      -- get the professor id if it exists
      select id into _p_id
      from professors
      where last_name = _professor[2] and first_name = _professor[1];

      -- if the professor does not exist, create it
      if  _p_id is null then
        insert into professors (first_name, last_name, university_id)
        values (_professor[1], _professor[2], _university_id)
        returning id into _p_id;
      end if;

      -- check if the professer is listed under this section
      if not exists(select 1
                    from section_professor sp
                    where sp.section_id = _s_id and sp.professor_id = _p_id)
      then
        insert into section_professor (section_id, professor_id) values (_s_id, _p_id);
      end if;
    end loop;

    -- if the course existed, make sure the title is up to date
    if not _new_course then
      -- get the latest quarter which the course was offered in
      select q.id into _latest_quarter
      from quarters q
        join sections s on q.id = s.quarter_id
        join courses c on s.course_id = c.id
      where c.id = _c_id and q.university_id = _university_id
      order by lower(period) desc
      limit 1;

      -- if this course info is for the latest quarter, update the title
      if _quarter = _latest_quarter then
        update courses
        set title = _title
        where id = _c_id;
      end if;

    end if;

    _count = _count + 1;
  end loop;

  return _count;
end;
$func$ language plpgsql;"""))