def upgrade():
    op.execute(Category.__table__.update().
               values(name='Reversing').
               where(Category.name == 'Binary'))
    op.execute(Category.__table__.insert().
               values([{'name': 'Internals'},
                       {'name': 'Exploiting'}]))
def downgrade():
    from lite_mms.constants import groups
    group_table = sa.sql.table("TB_GROUP", 
                 sa.Column("id", sa.Integer, primary_key=True),
                 sa.Column("name", sa.String(32), nullable=False, unique=True),
                 sa.Column("default_url", sa.String(256)))
    op.execute(group_table.update().where(group_table.c.id==groups.ADMINISTRATOR).values(default_url="/admin/"))
Esempio n. 3
0
def upgrade():
    ### commands auto generated by Alembic - please adjust! ###
    op.create_table('roles_authorities',
    sa.Column('authority_id', sa.Integer(), nullable=True),
    sa.Column('role_id', sa.Integer(), nullable=True),
    sa.ForeignKeyConstraint(['authority_id'], ['authorities.id'], ),
    sa.ForeignKeyConstraint(['role_id'], ['roles.id'], )
    )
    op.create_index('roles_authorities_ix', 'roles_authorities', ['authority_id', 'role_id'], unique=True)
    op.create_table('roles_certificates',
    sa.Column('certificate_id', sa.Integer(), nullable=True),
    sa.Column('role_id', sa.Integer(), nullable=True),
    sa.ForeignKeyConstraint(['certificate_id'], ['certificates.id'], ),
    sa.ForeignKeyConstraint(['role_id'], ['roles.id'], )
    )
    op.create_index('roles_certificates_ix', 'roles_certificates', ['certificate_id', 'role_id'], unique=True)
    op.create_index('certificate_associations_ix', 'certificate_associations', ['domain_id', 'certificate_id'], unique=True)
    op.create_index('certificate_destination_associations_ix', 'certificate_destination_associations', ['destination_id', 'certificate_id'], unique=True)
    op.create_index('certificate_notification_associations_ix', 'certificate_notification_associations', ['notification_id', 'certificate_id'], unique=True)
    op.create_index('certificate_replacement_associations_ix', 'certificate_replacement_associations', ['certificate_id', 'certificate_id'], unique=True)
    op.create_index('certificate_source_associations_ix', 'certificate_source_associations', ['source_id', 'certificate_id'], unique=True)
    op.create_index('roles_users_ix', 'roles_users', ['user_id', 'role_id'], unique=True)

    ### end Alembic commands ###

    # migrate existing authority_id relationship to many_to_many
    conn = op.get_bind()
    for id, authority_id in conn.execute(text('select id, authority_id from roles where authority_id is not null')):
        stmt = text('insert into roles_authorities (role_id, authority_id) values (:role_id, :authority_id)')
        stmt = stmt.bindparams(role_id=id, authority_id=authority_id)
        op.execute(stmt)
Esempio n. 4
0
def _add_parent_id_nn_column():
  """Add parent_id_nn column and make it non nullable.

  The creation of the column and making it non nullable are done in separate
  steps to avoid mysql warnings for missing values.
  """
  op.add_column(
      'access_control_list',
      sa.Column(
          'parent_id_nn',
          sa.Integer(),
          server_default="0",
          nullable=True,
      ),
  )

  op.execute("update access_control_list set parent_id_nn = parent_id")
  op.execute("""
      update access_control_list
      set parent_id_nn = 0
      where parent_id_nn is null
  """)

  op.alter_column(
      'access_control_list',
      'parent_id_nn',
      existing_type=sa.Integer(),
      server_default="0",
      nullable=False,
  )
Esempio n. 5
0
def upgrade():
    ''' Add the artifact field to the Package table, fill it and adjust
    the unique key constraints.
    '''

    op.add_column(
        'Package',
        sa.Column(
            'namespace',
            sa.String(50),
            sa.ForeignKey(
                'namespaces.namespace',
                onupdate='CASCADE',
                ondelete='CASCADE',
            ),
            default='rpms',
        )
    )

    op.execute('''UPDATE "Package" SET namespace='rpms';''')

    op.alter_column(
        'Package',
        column_name='namespace',
        nullable=False,
        existing_nullable=True)

    op.execute("""
DROP INDEX IF EXISTS "ix_Package_name";
ALTER TABLE "Package"
  ADD CONSTRAINT "ix_package_name_namespace" UNIQUE (name, namespace);
""")
def upgradeTransferTypes():
    update_sql = ("update sample_transfer_type"
                  " set name = :name,"
                  " transfer_template_id = :stti,"
                  " source_plate_count = :spc,"
                  " destination_plate_count = :dpc"
                  " where id = :id")

    insert_sql = ("insert into sample_transfer_type"
                  " (id, name, transfer_template_id,"
                  " source_plate_count, destination_plate_count)"
                  " select :id, :name, :stti, :spc, :dpc"
                  " where not exists"
                  " (select * from sample_transfer_type where id = :id)")

    sql_param_names = ("id", "name", "stti", "spc", "dpc")

    desired_values = [
        [53, "PCA Pre-Planning", 35, 1, 4, None]
        ,[54, "Primer Hitpicking - Create Source", 2, 1, 0, None]
        ,[55, "PCA master mix addition", 2, 1, 0, None]
        ,[56, "PCA Thermocycling", 2, 1, 0, None]
        ,[57, "PCR Primer Hitpicking", 36, 1, 4, None]
        ,[58, "PCA/PCR Master Mix Addition", 37, 1, 4, None]
        ,[59, "PCA/PCR Thermocycling", 2, 1, 0, None]
    ]

    for row_values in desired_values:
        args = dict(zip(sql_param_names, row_values))

        # try update first, then insert.  The WHERE clauses in the SQL
        # for insert and update should allow this without causing failures.
        op.execute(sa.text(update_sql).bindparams(**args))
        op.execute(sa.text(insert_sql).bindparams(**args))
def downgrade():
    connection = op.get_bind()

    # Create old AZ fields
    op.add_column('services', Column('availability_zone', String(length=255)))
    op.add_column('share_instances',
                  Column('availability_zone', String(length=255)))

    # Migrate data
    az_table = utils.load_table('availability_zones', connection)
    share_instances_table = utils.load_table('share_instances', connection)
    services_table = utils.load_table('services', connection)

    for az in connection.execute(az_table.select()):
        op.execute(
            share_instances_table.update().where(
                share_instances_table.c.availability_zone_id == az.id
            ).values({'availability_zone': az.name})
        )
        op.execute(
            services_table.update().where(
                services_table.c.availability_zone_id == az.id
            ).values({'availability_zone': az.name})
        )

    # Remove AZ_id columns and AZ table
    op.drop_constraint('service_az_id_fk', 'services', type_='foreignkey')
    op.drop_column('services', 'availability_zone_id')
    op.drop_constraint('si_az_id_fk', 'share_instances', type_='foreignkey')
    op.drop_column('share_instances', 'availability_zone_id')
    op.drop_table('availability_zones')
def upgrade():
    op.execute("""
        INSERT INTO release_repositories
        VALUES
        ('mozilla-central-android-api-11'),
        ('mozilla-aurora-android-api-11')
    """)
def upgradeTransferTemplates():
    update_sql = ("update sample_transfer_template"
                  " set name = :name,"
                  " is_one_to_one_transfer = :ioto,"
                  " source_plate_well_count = :spwc,"
                  " destination_plate_well_count = :dpwc"
                  " where id = :id")

    insert_sql = ("insert into sample_transfer_template"
                  " (id, name, is_one_to_one_transfer, source_plate_well_count,"
                  " destination_plate_well_count)"
                  " select :id, :name, :ioto, :spwc, :dpwc"
                  " where not exists"
                  " (select * from sample_transfer_template where id = :id)")

    sql_param_names = ("id", "name", "ioto", "spwc", "dpwc")

    desired_values = [
        [35, "PCA Pre-Planning", "F", None, None]
        ,[36, "PCR Primer Hitpicking", "F", None, None]
        ,[37, "PCA/PCR Master Mix Addition", "F", None, None]

    ]

    for row_values in desired_values:
        args = dict(zip(sql_param_names, row_values))

        # try update first, then insert.  The WHERE clauses in the SQL
        # for insert and update should allow this without causing failures.
        op.execute(sa.text(update_sql).bindparams(**args))
        op.execute(sa.text(insert_sql).bindparams(**args))
def downgrade():
    for query in ("alter table company DROP FOREIGN KEY company_ibfk_1",
                  "alter table company DROP FOREIGN KEY company_ibfk_2",):
        op.execute(query)
    op.drop_column('company', 'logo_id')
    op.drop_column('company', 'header_id')
    op.drop_table('config_files')
Esempio n. 11
0
def upgrade():
    ### commands auto generated by Alembic - please adjust! ###
    op.create_table('associate_disruption_pt_object',
    sa.Column('disruption_id', postgresql.UUID(), nullable=False),
    sa.Column('pt_object_id', postgresql.UUID(), nullable=False),
    sa.ForeignKeyConstraint(['disruption_id'], ['disruption.id'], ),
    sa.ForeignKeyConstraint(['pt_object_id'], ['pt_object.id'], ),
    sa.PrimaryKeyConstraint('disruption_id', 'pt_object_id', name='disruption_pt_object_pk')
    )
    connection = op.get_bind()
    result = connection.execute('select pt.id as pt_id, dd.id as dis_id, dd.created_at as created_at,'
                                ' dd.localization_id as loc_id '
                                'from disruption as dd left OUTER join pt_object as pt '
                                'on pt.uri = dd.localization_id')
    for row in result:
        # Pt_object exist in database
        if row['pt_id']:
            pt_object_id = row['pt_id']
        else:
            # Pt_object not exist in database
            op.execute("INSERT INTO pt_object (created_at, id, type, uri) VALUES ('{}', '{}', '{}', '{}')".
                       format(row['created_at'], row['dis_id'], 'stop_area', row['loc_id']))
            pt_object_id = row['dis_id']

        op.execute("INSERT INTO associate_disruption_pt_object (disruption_id, pt_object_id) VALUES ('{}', '{}')".
                   format(row['dis_id'], pt_object_id))

    op.drop_column(u'disruption', 'localization_id')
def upgrade():
    op.create_table(u'exploitability_reports',
    sa.Column(u'signature_id', sa.INTEGER(), nullable=False),
    sa.Column(u'report_date', sa.DATE(), nullable=False),
    sa.Column(u'null_count', sa.INTEGER(), server_default='0', nullable=False),
    sa.Column(u'none_count', sa.INTEGER(), server_default='0', nullable=False),
    sa.Column(u'low_count', sa.INTEGER(), server_default='0', nullable=False),
    sa.Column(u'medium_count', sa.INTEGER(), server_default='0', nullable=False),
    sa.Column(u'high_count', sa.INTEGER(), server_default='0', nullable=False),
    sa.ForeignKeyConstraint(['signature_id'], [u'signatures.signature_id'], ),
    sa.PrimaryKeyConstraint()
    )
    # We can probably get away with just applying this to the parent table
    # If there are performance problems on stage, break this out and apply to all
    # child partitions first, then reports_clean last.
    op.add_column(u'reports_clean', sa.Column(u'exploitability', sa.TEXT(), nullable=True))
    app_path=os.getcwd()
    procs = [
        '001_update_reports_clean.sql'
        , 'update_exploitability.sql'
        , 'backfill_exploitability.sql'
     ]
    for myfile in [app_path + '/socorro/external/postgresql/raw_sql/procs/' + line for line in procs]:
        proc = open(myfile, 'r').read()
        op.execute(proc)
Esempio n. 13
0
def downgrade_node_error_type():
    enum_type = sa.Enum(*node_error_types_old, name='node_error_type')
    enum_type.create(op.get_bind(), checkfirst=False)
    op.execute(
        u'ALTER TABLE nodes ALTER COLUMN error_type TYPE  node_error_type'
        u' USING error_type::text::node_error_type'
    )
def upgrade_table(table_name):
  """Add audit foreign key to a table."""

  op.add_column(
      table_name,
      sa.Column("audit_id", sa.Integer(), nullable=True)
  )
  op.execute("""
      UPDATE {table_name} AS t
      JOIN contexts AS c ON
          c.id = t.context_id AND
          c.related_object_type = "Audit"
      JOIN audits AS au ON
          c.related_object_id = au.id
      SET
        t.audit_id = au.id
  """.format(
      table_name=table_name,
  ))

  op.alter_column(
      table_name,
      "audit_id",
      existing_type=sa.Integer(),
      nullable=False
  )

  op.create_foreign_key(
      "fk_{}_audits".format(table_name),
      table_name,
      "audits",
      ["audit_id"],
      ["id"],
      ondelete="RESTRICT"
  )
def upgrade():
    # constraint name is autogenerated differently between pg versions
    try:
        op.drop_constraint("reportunknownpackages_report_id_key", t)
    except:
        op.execute('ROLLBACK')
        op.drop_constraint(
            "reportunknownpackages_report_id_type_name_installed_epoch_i_key",
            t)

    op.drop_constraint("reportunknownpackages_installed_arch_id_fkey", t)

    op.drop_column(t, "running_epoch")
    op.drop_column(t, "running_version")
    op.drop_column(t, "running_release")
    op.drop_column(t, "running_arch_id")

    op.alter_column(t, "installed_epoch", new_column_name="epoch")
    op.alter_column(t, "installed_version", new_column_name="version")
    op.alter_column(t, "installed_release", new_column_name="release")
    op.alter_column(t, "installed_arch_id", new_column_name="arch_id")

    op.create_foreign_key("reportunknownpackages_arch_id_fkey", t,
                          "archs", ["arch_id"], ["id"])

    fields = ["report_id", "type", "name", "epoch",
              "version", "release", "arch_id"]

    op.create_unique_constraint("reportunknownpackages_report_id_key", t,
                                fields)
Esempio n. 16
0
def upgrade():

  op.alter_column(
      'cycle_task_group_object_tasks',
      'cycle_task_group_object_id',
      existing_type=sa.Integer(),
      nullable=True
  )

  op.add_column(
      'cycle_task_group_object_tasks',
      sa.Column('cycle_task_group_id', sa.Integer(), nullable=False)
  )

  op.execute("""
    UPDATE cycle_task_group_object_tasks
    SET cycle_task_group_id=(
      SELECT cycle_task_group_id
      FROM cycle_task_group_objects
      WHERE id=cycle_task_group_object_tasks.cycle_task_group_object_id
    )
  """)

  op.create_foreign_key(
      "cycle_task_group_id", "cycle_task_group_object_tasks",
      "cycle_task_groups", ["cycle_task_group_id"], ["id"]
  )
def upgrade():
    connection = op.get_bind()
    op.add_column('info_touristique', GeometryExtensionColumn('infotour_location',
                                                              Geometry(dimension=2,
                                                                       srid=3447)))
    infotouristique = InfoTouristique.__table__
    query = sa.select([infotouristique.c.infotour_pk,
                       infotouristique.c.infotour_gps_lat,
                       infotouristique.c.infotour_gps_long])
    query.append_whereclause(sa.and_(infotouristique.c.infotour_gps_lat != None,
                                     infotouristique.c.infotour_gps_long != None))
    infos = connection.execute(query).fetchall()
    i = 0
    pbar = progressbar.ProgressBar(widgets=[progressbar.Percentage(),
                                            progressbar.Bar()],
                                   maxval=len(infos)).start()
    for info in infos:
        point = 'POINT(%s %s)' % (info.infotour_gps_long, info.infotour_gps_lat)
        point = geoalchemy.base.WKTSpatialElement(point, srid=3447)
        op.execute(
            infotouristique.update().
            where(infotouristique.c.infotour_pk == info.infotour_pk).
            values({'infotour_location': point}))
        pbar.update(i)
        i += 1
    pbar.finish()
def downgrade():
    op.execute("""
    DROP INDEX ix_applied_change_dep_name;

    DROP FUNCTION rpmvercmp(varchar, varchar);
    DROP FUNCTION rpmvercmp_evr(integer, varchar, varchar, integer, varchar, varchar);
    """)
def upgrade():
    resource_type = op.create_table(
        'resource_type',
        sa.Column('name', sa.String(length=255), nullable=False),
        sa.PrimaryKeyConstraint('name'),
        mysql_charset='utf8',
        mysql_engine='InnoDB'
    )

    resource = sa.Table('resource', sa.MetaData(),
                        type_string_col("type", "resource"))
    op.execute(resource_type.insert().from_select(
        ['name'], sa.select([resource.c.type]).distinct()))

    for table in ["resource", "resource_history"]:
        op.alter_column(table, "type", new_column_name="old_type",
                        existing_type=type_enum)
        op.add_column(table, type_string_col("type", table))
        sa_table = sa.Table(table, sa.MetaData(),
                            type_string_col("type", table),
                            type_enum_col('old_type'))
        op.execute(sa_table.update().values(
            {sa_table.c.type: sa_table.c.old_type}))
        op.drop_column(table, "old_type")
        op.alter_column(table, "type", nullable=False,
                        existing_type=type_string)
def upgrade():
    # Delete the stored procedures
    op.execute('DROP FUNCTION IF EXISTS update_signatures(date, boolean)')
    op.execute('DROP FUNCTION IF EXISTS update_signatures_hourly(timestamp, interval, boolean)')

    # Load the new version of backfill_matviews
    load_stored_proc(op, ['backfill_matviews.sql'])
def upgrade():
    op.execute('update notifications set notification_type = (select cast(cast(template_type as text) as notification_type) from templates where templates.id= notifications.template_id)')
    conn = op.get_bind()
    reset_counts = "update notification_statistics set emails_requested = 0, emails_delivered = 0, emails_failed=0," \
                   "sms_requested = 0, sms_delivered = 0, sms_failed=0 where day > '2016-06-30'"
    op.execute(reset_counts)
    all_notifications = "select * from notifications where date(created_at) > '2016-06-30' order by created_at;"

    results = conn.execute(all_notifications)
    res = results.fetchall()

    for x in res:
        print(' in loop {} {}'.format(x.notification_type, x.created_at))
        created = x.created_at.strftime("%Y-%m-%d")
        if x.notification_type == 'email' and x.status == 'delivered':
            sql = "update notification_statistics set emails_requested = emails_requested + 1, " \
                  "emails_delivered = emails_delivered + 1 where day = date('{}') and service_id = '{}'".format(created, x.service_id)
        if x.notification_type == 'sms' and x.status == 'delivered':
             sql = "update notification_statistics set sms_requested = sms_requested + 1, " \
                  "sms_delivered = sms_delivered + 1 where day = date('{}') and service_id = '{}'".format(created, x.service_id)
        if x.notification_type == 'email' and x.status in ['technical-failure', 'temporary-failure', 'permanent-failure']:
            sql = "update notification_statistics set emails_requested = emails_requested + 1, " \
                  "emails_failed = emails_failed + 1 where day = date('{}') and service_id = '{}'".format(created, x.service_id)
        if x.notification_type == 'sms' and x.status in ['technical-failure', 'temporary-failure', 'permanent-failure']:
            sql = "update notification_statistics set sms_requested = sms_requested + 1, " \
                  "sms_failed = sms_failed + 1 where day = date('{}') and service_id = '{}'".format(created, x.service_id)
        if x.notification_type == 'email' and x.status in ['created', 'sending', 'pending']:
            sql = "update notification_statistics set emails_requested = emails_requested + 1 " \
                  " where day = date('{}') and service_id = '{}'".format(created, x.service_id)
        if x.notification_type == 'sms' and x.status in ['created', 'sending', 'pending']:
            sql = "update notification_statistics set sms_requested = sms_requested + 1 " \
                  " where day = date('{}') and service_id = '{}'".format(created, x.service_id)
        print(sql)
        conn.execute(sql)
def upgrade():
    conn = op.get_bind()
    # Minimal examples of the tables we need to manipulate
    listener = sa.sql.table(
        'listener',
        sa.sql.column('load_balancer_id', sa.String),
        sa.sql.column('default_pool_id', sa.String))
    pool = sa.sql.table(
        'pool',
        sa.sql.column('load_balancer_id', sa.String),
        sa.sql.column('id', sa.String))

    # This foreign key does not need to be unique anymore. To remove the
    # uniqueness but keep the foreign key we have to do some juggling.
    op.drop_constraint('fk_listener_pool_id', 'listener',
                       type_='foreignkey')
    op.drop_constraint('uq_listener_default_pool_id', 'listener',
                       type_='unique')
    op.create_foreign_key('fk_listener_pool_id', 'listener',
                          'pool', ['default_pool_id'], ['id'])

    op.add_column(u'pool',
                  sa.Column('load_balancer_id', sa.String(36),
                            sa.ForeignKey('load_balancer.id'), nullable=True))

    # Populate this new column appropriately
    select_obj = sa.select([listener.c.load_balancer_id,
                           listener.c.default_pool_id]).where(
                               listener.c.default_pool_id is not None)
    result = conn.execute(select_obj)
    for row in result:
        stmt = pool.update().values(load_balancer_id=row[0]).where(
            pool.c.id == row[1])
        op.execute(stmt)
def upgrade():
    status_to_order = """
CREATE OR REPLACE FUNCTION status_to_order (x integer)
RETURNS integer AS $$ BEGIN
        RETURN CASE WHEN x = 0 THEN 0
                    WHEN x = 3 THEN 1
                    WHEN x = 6 THEN 2
                    WHEN x = 7 THEN 3
                    WHEN x = 4 THEN 4
                    WHEN x = 1 THEN 5
                    WHEN x = 5 THEN 6
               ELSE 1000
        END; END;
    $$ LANGUAGE plpgsql;
"""

    order_to_status = """
CREATE OR REPLACE FUNCTION order_to_status (x integer)
RETURNS integer AS $$ BEGIN
        RETURN CASE WHEN x = 0 THEN 0
                    WHEN x = 1 THEN 3
                    WHEN x = 2 THEN 6
                    WHEN x = 3 THEN 7
                    WHEN x = 4 THEN 4
                    WHEN x = 5 THEN 1
                    WHEN x = 6 THEN 5
               ELSE 1000
        END; END;
    $$ LANGUAGE plpgsql;
"""

    if op.get_bind().dialect.name == "postgresql":
        op.execute(status_to_order)
        op.execute(order_to_status)
def upgrade():
    op.execute('ALTER TABLE roombooking.rooms ALTER COLUMN owner_id TYPE int USING owner_id::int')
    op.create_foreign_key(None,
                          'rooms', 'users',
                          ['owner_id'], ['id'],
                          source_schema='roombooking', referent_schema='users')
    op.create_index(None, 'rooms', ['owner_id'], unique=False, schema='roombooking')
def downgrade():
    stmt = '''\
DROP TABLE cell_{id}
'''
    shard_ids = ('gsm_ocid', 'wcdma_ocid', 'lte_ocid')
    for shard_id in shard_ids:
        op.execute(sa.text(stmt.format(id=shard_id)))
def upgrade():
    op.add_column(
        'environments',
        sa.Column(
            'zone_id',
            INTEGER(),
            info={'after': 'prefix'}
        )
    )
    op.execute(
        'update environments join zones on environments.domain = '
        'zones.zoneName set environments.zone_id = zones.ZoneID'
    )
    op.alter_column(
        'environments',
        'zone_id',
        nullable=False,
        existing_type=INTEGER()
    )
    op.create_foreign_key(
        'fk_environments_zone_id_zones',
        'environments',
        'zones',
        ['zone_id'],
        ['zoneID']
    )
def downgrade():
    ### commands auto generated by Alembic - please adjust! ###
    op.drop_column('series', 'analytic_context_key')
    op.drop_index('ix_homepath_census_block', table_name='homepath_listing')
    op.drop_index(op.f('ix_craigslist_listing_subdomain'), table_name='craigslist_listing')
    op.create_table('census_block_series',
    sa.Column('state_fp', sa.VARCHAR(length=2), autoincrement=False, nullable=False),
    sa.Column('county_fp', sa.VARCHAR(length=3), autoincrement=False, nullable=False),
    sa.Column('tract_ce', sa.VARCHAR(length=6), autoincrement=False, nullable=False),
    sa.Column('block_ce', sa.VARCHAR(length=1), autoincrement=False, nullable=False),
    sa.Column('series_key', sa.VARCHAR(length=40), autoincrement=False, nullable=False),
    sa.ForeignKeyConstraint(['series_key'], [u'series.key'], name=u'fk_census_block_series_series_key_series'),
    sa.ForeignKeyConstraint(['state_fp', 'county_fp', 'tract_ce', 'block_ce'], [u'census_block.state_fp', u'census_block.county_fp', u'census_block.tract_ce', u'census_block.block_ce'], name=u'fk_census_block_series'),
    sa.PrimaryKeyConstraint('state_fp', 'county_fp', 'tract_ce', 'block_ce', 'series_key', name=u'pk_census_block_series')
    )
    op.create_table('series_segment',
    sa.Column('series_key', sa.VARCHAR(length=40), autoincrement=False, nullable=False),
    sa.Column('dimension_id', sa.VARCHAR(length=20), autoincrement=False, nullable=False),
    sa.Column('segment_id', sa.VARCHAR(length=20), autoincrement=False, nullable=False),
    sa.ForeignKeyConstraint(['dimension_id', 'segment_id'], [u'segment.dimension_id', u'segment.id'], name=u'fk_series_segment'),
    sa.ForeignKeyConstraint(['dimension_id'], [u'dimension.id'], name=u'fk_series_segment_dimension_id_dimension'),
    sa.ForeignKeyConstraint(['series_key'], [u'series.key'], name=u'fk_series_segment_series_key_series'),
    sa.PrimaryKeyConstraint('series_key', 'dimension_id', 'segment_id', name=u'pk_series_segment')
    )
    op.drop_index('ix_census_block_segment_census_block', table_name='census_block_segment')
    op.drop_table('census_block_segment')
    op.drop_index(op.f('ix_analytic_context_segment_analytic_context_key'), table_name='analytic_context_segment')
    op.drop_index('ix_analytic_context_segment', table_name='analytic_context_segment')
    op.drop_table('analytic_context_segment')
    op.drop_table('analytic_context')

    op.execute(segment.delete(segment.c.dimension_id == 'census_block'))
    op.alter_column('segment', 'sort_value', type_=sa.Unicode(10))
def _update_region_batch(bind, shard_id, geocoder, batch=10000):
    rows = bind.execute(sa.text(stmt_select_region.format(
        id=shard_id, batch=batch))).fetchall()

    areas = set()
    cells = {}
    deleted = 0

    i = 0
    for row in rows:
        code = geocoder.region_for_cell(row.lat, row.lon, row.mcc)
        if code not in cells:
            cells[code] = []
        cells[code].append(row[0])
        if not code:
            # cellid is a 11 byte column, the last 4 byte being the
            # cid, but this is hex encoded, so 22 byte minus 8 byte
            # is the area id
            areas.add(row[0][:14])
            deleted += 1
        i += 1

    for code, cellids in cells.items():
        ids = 'UNHEX("' + '"), UNHEX("'.join(cellids) + '")'
        if not code:
            op.execute(sa.text(stmt_delete_outside.format(
                id=shard_id, ids=ids)))
        else:
            op.execute(sa.text(stmt_update_region.format(
                id=shard_id, code=code, ids=ids)))

    return (i, areas, deleted)
def upgrade():
    op.add_column('jobpost', sa.Column('search_vector', postgresql.TSVECTOR(), nullable=True))
    op.execute(sa.DDL(
        '''
        CREATE FUNCTION jobpost_search_vector_update() RETURNS TRIGGER AS $$
        BEGIN
            IF TG_OP = 'INSERT' THEN
                NEW.search_vector = to_tsvector('english', COALESCE(NEW.company_name, '') || ' ' || COALESCE(NEW.headline, '') || ' ' || COALESCE(NEW.headlineb, '') || ' ' || COALESCE(NEW.description, '') || ' ' || COALESCE(NEW.perks, ''));
            END IF;
            IF TG_OP = 'UPDATE' THEN
                IF NEW.headline <> OLD.headline OR COALESCE(NEW.headlineb, '') <> COALESCE(OLD.headlineb, '') OR NEW.description <> OLD.description OR NEW.perks <> OLD.perks THEN
                    NEW.search_vector = to_tsvector('english', COALESCE(NEW.company_name, '') || ' ' || COALESCE(NEW.headline, '') || ' ' || COALESCE(NEW.headlineb, '') || ' ' || COALESCE(NEW.description, '') || ' ' || COALESCE(NEW.perks, ''));
                END IF;
            END IF;
            RETURN NEW;
        END
        $$ LANGUAGE 'plpgsql';

        CREATE TRIGGER jobpost_search_vector_trigger BEFORE INSERT OR UPDATE ON jobpost
        FOR EACH ROW EXECUTE PROCEDURE jobpost_search_vector_update();

        CREATE INDEX ix_jobpost_search_vector ON jobpost USING gin(search_vector);

        UPDATE jobpost SET search_vector = to_tsvector('english', COALESCE(company_name, '') || ' ' || COALESCE(headline, '') || ' ' || COALESCE(headlineb, '') || ' ' || COALESCE(description, '') || ' ' || COALESCE(perks, ''));
        '''))
def grant_permission_for_group(permission_name, group_name):
    execute(
        groups_permissions.insert().values(
            group_id=select([groups.c.group_id]).where(groups.c.group_name == group_name),
            permission_id=select([permissions.c.permission_id]).where(permissions.c.permission_name == permission_name)
        )
    )
Esempio n. 31
0
def downgrade():
    op.drop_table('event')
    op.execute("DROP TYPE statuses;")
def downgrade():
    query = "REVOKE CONNECT ON DATABASE {0} FROM {1} ".format(
        current_app.config.get("SQL_DATABASE"),
        current_app.config.get("FEEDER_SQL_USERNAME"))
    op.execute(query)
def upgrade():
    query = "GRANT CONNECT ON DATABASE {0} TO {1} ".format(
        current_app.config.get("SQL_DATABASE"),
        current_app.config.get("FEEDER_SQL_USERNAME"))
    op.execute(query)
Esempio n. 34
0
def downgrade():
    op.get_bind()
    op.execute(
        "update services set name = 'GOV.UK Notify' where id = 'd6aa2c68-a2d9-4437-ab19-3ae8eb202553'"
    )
Esempio n. 35
0
def upgrade():
    op.get_bind()
    op.execute(
        "update services set name = 'Catalyst Notify' where id = 'd6aa2c68-a2d9-4437-ab19-3ae8eb202553'"
    )
def upgrade() -> None:
    op.execute("update facility_scada set network_id='AEMO_ROOFTOP' where facility_code like 'ROOFTOP_NEM_%';")
Esempio n. 37
0
def upgrade():
    # ### commands auto generated by Alembic - please adjust! ###
    op.create_table(
        'charakteristika_obci', sa.Column('datum', sa.Date(), nullable=False),
        sa.Column('kraj_kod', sa.Unicode(), nullable=False),
        sa.Column('okres_kod', sa.Unicode(), nullable=False),
        sa.Column('orp_kod', sa.Unicode(), nullable=False),
        sa.Column('nove_pripady', sa.Integer(), nullable=True),
        sa.Column('aktivni_pripady', sa.Integer(), nullable=True),
        sa.Column('nove_pripady_65', sa.Integer(), nullable=True),
        sa.Column('nove_pripady_7_dni', sa.Integer(), nullable=True),
        sa.Column('nove_pripady_14_dni', sa.Integer(), nullable=True),
        sa.PrimaryKeyConstraint('datum', 'kraj_kod', 'okres_kod', 'orp_kod'))

    op.create_table('populace_orp',
                    sa.Column('orp_kod', sa.Unicode(), nullable=False),
                    sa.Column('pocet', sa.Integer(), nullable=False),
                    sa.PrimaryKeyConstraint('orp_kod'))

    op.add_column('obce_orp',
                  sa.Column('ruian_kod', sa.Integer(), nullable=True))

    connection = op.get_bind()

    df = pd.read_csv('data/populace_orp.csv', delimiter=';')
    df['orp_kod'] = df['orp_kod'].astype(str)
    df.to_sql(PopulaceOrp.__tablename__,
              connection,
              if_exists='replace',
              index=False)

    df = pd.read_csv('data/obce_orp.csv', delimiter=';')
    df['kod_obce_orp'] = df['kod_obce_orp'].astype(str)
    df['uzis_orp'] = df['uzis_orp'].astype(str)
    df = df[[
        'kod_obce_orp', 'nazev_obce', 'okres_nuts', 'kraj_nuts', 'aken',
        'uzis_orp', 'ruian_kod'
    ]]
    df.to_sql(ObceORP.__tablename__,
              connection,
              if_exists='replace',
              index=False)

    op.add_column('ockovani_lide',
                  sa.Column('orp_bydl_kod', sa.Unicode(), nullable=True))
    op.execute("UPDATE ockovani_lide SET orp_bydl_kod = '-'")
    op.execute('ALTER TABLE ockovani_lide DROP CONSTRAINT ockovani_lide_pkey')
    op.create_primary_key('ockovani_lide_pkey', 'ockovani_lide', [
        'datum', 'vakcina', 'zarizeni_kod', 'poradi_davky', 'vekova_skupina',
        'kraj_bydl_nuts', 'orp_bydl_kod', 'indikace_zdravotnik',
        'indikace_socialni_sluzby', 'indikace_ostatni', 'indikace_pedagog',
        'indikace_skolstvi_ostatni', 'indikace_bezpecnostni_infrastruktura',
        'indikace_chronicke_onemocneni'
    ])

    op.alter_column('populace',
                    'orp_kod',
                    existing_type=sa.TEXT(),
                    nullable=False)
    op.alter_column('populace',
                    'pocet',
                    existing_type=sa.BIGINT(),
                    nullable=False)
    op.alter_column('populace',
                    'vek',
                    existing_type=sa.BIGINT(),
                    nullable=False)
    op.create_foreign_key(None, 'vakcinacka', 'ockovaci_mista', ['misto_id'],
                          ['id'])
Esempio n. 38
0
def upgrade():
    feature = FeatureToggle.SYNCHRONIZE_VENUE_PROVIDER_IN_WORKER
    op.execute(f"""INSERT INTO feature (name, description, "isActive")
        VALUES ('{feature.name}', '{feature.value}', False)
        """)
def downgrade():
    for stmt in bulk_rename(mapping, True):
        op.execute(stmt)
Esempio n. 40
0
def downgrade():
    feature = FeatureToggle.SYNCHRONIZE_VENUE_PROVIDER_IN_WORKER
    op.execute(f"DELETE FROM feature WHERE name = '{feature.name}'")
def upgrade():
    conn = op.get_bind()

    # Deal with Alembic shit.
    # Alembic is so ORMish that it was impossible to write code which works on different DBMS.
    if conn.engine.driver == "psycopg2":
        # Altering status ENUM.
        # This shit of raw SQL is here because alembic doesn't deal well with alter_colum of ENUM type.
        # Commit because SQLAlchemy doesn't support ALTER TYPE in a transaction.
        op.execute('COMMIT')
        conn.execute("ALTER TYPE status_type ADD VALUE 'failed_reporting'")
    else:
        # Read data.
        tasks_data = []
        old_tasks = conn.execute(
            "select id, target, category, timeout, priority, custom, machine, package, options, platform, memory, enforce_timeout, clock, added_on, started_on, completed_on, status, sample_id from tasks").fetchall()
        for item in old_tasks:
            d = {}
            d["id"] = item[0]
            d["target"] = item[1]
            d["category"] = item[2]
            d["timeout"] = item[3]
            d["priority"] = item[4]
            d["custom"] = item[5]
            d["machine"] = item[6]
            d["package"] = item[7]
            d["options"] = item[8]
            d["platform"] = item[9]
            d["memory"] = item[10]
            d["enforce_timeout"] = item[11]

            if isinstance(item[12], datetime):
                d["clock"] = item[12]
            elif item[12]:
                d["clock"] = parse(item[12])
            else:
                d["clock"] = None

            if isinstance(item[13], datetime):
                d["added_on"] = item[13]
            elif item[13]:
                d["added_on"] = parse(item[13])
            else:
                d["added_on"] = None

            if isinstance(item[14], datetime):
                d["started_on"] = item[14]
            elif item[14]:
                d["started_on"] = parse(item[14])
            else:
                d["started_on"] = None

            if isinstance(item[15], datetime):
                d["completed_on"] = item[15]
            elif item[15]:
                d["completed_on"] = parse(item[15])
            else:
                d["completed_on"] = None

            d["status"] = item[16]
            d["sample_id"] = item[17]

            tasks_data.append(d)
        if conn.engine.driver == "mysqldb":
            # Disable foreign key checking to migrate table avoiding checks.
            op.execute('SET foreign_key_checks = 0')

            # Drop old table.
            op.drop_table("tasks")

            # Drop old Enum.
            sa.Enum(name="status_type").drop(op.get_bind(), checkfirst=False)
            # Create table with 1.2 schema.
            op.create_table(
                "tasks",
                sa.Column("id", sa.Integer(), nullable=False),
                sa.Column("target", sa.String(length=255), nullable=False),
                sa.Column("category", sa.String(length=255), nullable=False),
                sa.Column("timeout", sa.Integer(), server_default="0", nullable=False),
                sa.Column("priority", sa.Integer(), server_default="1", nullable=False),
                sa.Column("custom", sa.String(length=255), nullable=True),
                sa.Column("machine", sa.String(length=255), nullable=True),
                sa.Column("package", sa.String(length=255), nullable=True),
                sa.Column("options", sa.String(length=255), nullable=True),
                sa.Column("platform", sa.String(length=255), nullable=True),
                sa.Column("memory", sa.Boolean(), nullable=False, default=False),
                sa.Column("enforce_timeout", sa.Boolean(), nullable=False, default=False),
                sa.Column("clock", sa.DateTime(timezone=False), default=datetime.now, nullable=False),
                sa.Column("added_on", sa.DateTime(timezone=False), nullable=False),
                sa.Column("started_on", sa.DateTime(timezone=False), nullable=True),
                sa.Column("completed_on", sa.DateTime(timezone=False), nullable=True),
                sa.Column("status",
                          sa.Enum("pending", "running", "completed", "reported", "recovered", "failed_analysis",
                                  "failed_processing", "failed_reporting", name="status_type"),
                          server_default="pending", nullable=False),
                sa.Column("sample_id", sa.Integer, sa.ForeignKey("samples.id"), nullable=True),
                sa.PrimaryKeyConstraint("id")
            )
            op.execute('COMMIT')

            # Insert data.
            op.bulk_insert(db.Task.__table__, tasks_data)
            # Enable foreign key.
            op.execute('SET foreign_key_checks = 1')

        else:
            op.drop_table("tasks")

            # Create table with 1.2 schema.
            op.create_table(
                "tasks",
                sa.Column("id", sa.Integer(), nullable=False),
                sa.Column("target", sa.String(length=255), nullable=False),
                sa.Column("category", sa.String(length=255), nullable=False),
                sa.Column("timeout", sa.Integer(), server_default="0", nullable=False),
                sa.Column("priority", sa.Integer(), server_default="1", nullable=False),
                sa.Column("custom", sa.String(length=255), nullable=True),
                sa.Column("machine", sa.String(length=255), nullable=True),
                sa.Column("package", sa.String(length=255), nullable=True),
                sa.Column("options", sa.String(length=255), nullable=True),
                sa.Column("platform", sa.String(length=255), nullable=True),
                sa.Column("memory", sa.Boolean(), nullable=False, default=False),
                sa.Column("enforce_timeout", sa.Boolean(), nullable=False, default=False),
                sa.Column("clock", sa.DateTime(timezone=False), default=datetime.now, nullable=False),
                sa.Column("added_on", sa.DateTime(timezone=False), nullable=False),
                sa.Column("started_on", sa.DateTime(timezone=False), nullable=True),
                sa.Column("completed_on", sa.DateTime(timezone=False), nullable=True),
                sa.Column("status",
                          sa.Enum("pending", "running", "completed", "reported", "recovered", "failed_analysis",
                                  "failed_processing", "failed_reporting", name="status_type"),
                          server_default="pending", nullable=False),
                sa.Column("sample_id", sa.Integer, sa.ForeignKey("samples.id"), nullable=True),
                sa.PrimaryKeyConstraint("id")
            )

            # Insert data.
            op.bulk_insert(db.Task.__table__, tasks_data)
def delete_permission(permission_name):
    execute(
        permissions.delete().\
            where(permissions.c.permission_name==inline_literal(permission_name))
    )
Esempio n. 43
0
def upgrade():
    """Upgrade database schema and/or data, creating a new revision."""
    op.execute(DELETE_REVISIONS_SQL)
    op.execute(DELETE_RELATIONSHIPS_SQL)
def upgrade():
    for stmt in bulk_rename(mapping):
        op.execute(stmt)
def upgrade():

    op.execute(
        """ 
        -- Table Definition ----------------------------------------------

        CREATE TABLE flow (
            id uuid DEFAULT gen_random_uuid() PRIMARY KEY,
            created timestamp with time zone NOT NULL DEFAULT now(),
            version integer,
            name character varying NOT NULL,
            description character varying,
            environment jsonb,
            parameters jsonb DEFAULT '{}'::jsonb,
            archived boolean NOT NULL DEFAULT false,
            version_group_id character varying,
            storage jsonb,
            core_version character varying,
            updated timestamp with time zone NOT NULL DEFAULT now(),
            settings jsonb NOT NULL DEFAULT '{}'::jsonb,
            serialized_flow jsonb
        );

        -- Indices -------------------------------------------------------

        CREATE INDEX ix_flow_version ON flow(version int4_ops);
        CREATE INDEX ix_flow_name ON flow USING GIN (name gin_trgm_ops);
        CREATE INDEX ix_flow_version_group_id ON flow(version_group_id text_ops);
 
        """
    )

    op.execute(
        """

        -- Table Definition ----------------------------------------------

        CREATE TABLE flow_run (
            id uuid DEFAULT gen_random_uuid() PRIMARY KEY,
            created timestamp with time zone NOT NULL DEFAULT now(),
            flow_id uuid NOT NULL REFERENCES flow(id) ON DELETE CASCADE,
            parameters jsonb DEFAULT '{}'::jsonb,
            scheduled_start_time timestamp with time zone NOT NULL DEFAULT clock_timestamp(),
            auto_scheduled boolean NOT NULL DEFAULT false,
            heartbeat timestamp with time zone,
            start_time timestamp with time zone,
            end_time timestamp with time zone,
            duration interval,
            version integer NOT NULL DEFAULT 0,
            state character varying,
            state_timestamp timestamp with time zone,
            state_message character varying,
            state_result jsonb,
            state_start_time timestamp with time zone,
            serialized_state jsonb,
            name character varying,
            context jsonb,
            times_resurrected integer DEFAULT 0,
            updated timestamp with time zone NOT NULL DEFAULT now()
        );

        -- Indices -------------------------------------------------------

        CREATE INDEX ix_flow_run_flow_id ON flow_run(flow_id uuid_ops);
        CREATE INDEX ix_flow_run_heartbeat ON flow_run(heartbeat timestamptz_ops);
        CREATE INDEX ix_flow_run_state ON flow_run(state text_ops);
        CREATE INDEX ix_flow_run_start_time ON flow_run(start_time timestamptz_ops);
        CREATE INDEX ix_flow_run_name ON flow_run USING GIN (name gin_trgm_ops);
        CREATE INDEX ix_flow_run_flow_id_scheduled_start_time_for_hasura ON flow_run(flow_id uuid_ops,scheduled_start_time timestamptz_ops DESC);

        """
    )

    op.execute(
        """

        -- Table Definition ----------------------------------------------

        CREATE TABLE flow_run_state (
            id uuid DEFAULT gen_random_uuid() PRIMARY KEY,
            flow_run_id uuid NOT NULL REFERENCES flow_run(id) ON DELETE CASCADE,
            timestamp timestamp with time zone NOT NULL DEFAULT clock_timestamp(),
            state character varying NOT NULL,
            message character varying,
            result jsonb,
            start_time timestamp with time zone,
            serialized_state jsonb NOT NULL,
            created timestamp with time zone NOT NULL DEFAULT now(),
            updated timestamp with time zone NOT NULL DEFAULT now(),
            version integer DEFAULT 0
        );

        -- Indices -------------------------------------------------------

        CREATE INDEX ix_flow_run_state_state ON flow_run_state(state text_ops);
        CREATE INDEX ix_flow_run_state_flow_run_id ON flow_run_state(flow_run_id uuid_ops);
        CREATE INDEX ix_flow_run_state_timestamp ON flow_run_state(timestamp timestamptz_ops);

        """
    )

    op.execute(
        """

        -- Table Definition ----------------------------------------------

        CREATE TABLE task (
            id uuid DEFAULT gen_random_uuid() PRIMARY KEY,
            created timestamp with time zone NOT NULL DEFAULT now(),
            flow_id uuid NOT NULL REFERENCES flow(id) ON DELETE CASCADE,
            name character varying,
            slug character varying,
            description character varying,
            type character varying,
            max_retries integer,
            retry_delay interval,
            trigger character varying,
            mapped boolean NOT NULL DEFAULT false,
            auto_generated boolean NOT NULL DEFAULT false,
            cache_key character varying,
            is_root_task boolean NOT NULL DEFAULT false,
            is_terminal_task boolean NOT NULL DEFAULT false,
            is_reference_task boolean NOT NULL DEFAULT false,
            tags jsonb NOT NULL DEFAULT '[]'::jsonb,
            updated timestamp with time zone NOT NULL DEFAULT now(),
            CONSTRAINT task_flow_id_slug_key UNIQUE (flow_id, slug)
        );

        -- Indices -------------------------------------------------------

        CREATE INDEX ix_task_flow_id ON task(flow_id uuid_ops);
        CREATE INDEX ix_task_name ON task USING GIN (name gin_trgm_ops);
        CREATE INDEX ix_task_tags ON task USING GIN (tags jsonb_ops);

        """
    )

    op.execute(
        """
        -- Table Definition ----------------------------------------------

        CREATE TABLE task_run (
            id uuid DEFAULT gen_random_uuid() PRIMARY KEY,
            created timestamp with time zone NOT NULL DEFAULT now(),
            flow_run_id uuid NOT NULL REFERENCES flow_run(id) ON DELETE CASCADE,
            task_id uuid NOT NULL REFERENCES task(id) ON DELETE CASCADE,
            map_index integer NOT NULL DEFAULT '-1'::integer,
            version integer NOT NULL DEFAULT 0,
            heartbeat timestamp with time zone,
            start_time timestamp with time zone,
            end_time timestamp with time zone,
            duration interval,
            run_count integer NOT NULL DEFAULT 0,
            state character varying,
            state_timestamp timestamp with time zone,
            state_message character varying,
            state_result jsonb,
            state_start_time timestamp with time zone,
            serialized_state jsonb,
            cache_key character varying,
            updated timestamp with time zone NOT NULL DEFAULT now(),
            CONSTRAINT task_run_unique_identifier_key UNIQUE (flow_run_id, task_id, map_index)
        );

        -- Indices -------------------------------------------------------

        CREATE INDEX ix_task_run_heartbeat ON task_run(heartbeat timestamptz_ops);
        CREATE INDEX ix_task_run_state ON task_run(state text_ops);
        CREATE INDEX ix_task_run_flow_run_id ON task_run(flow_run_id uuid_ops);
        CREATE INDEX ix_task_run_task_id ON task_run(task_id uuid_ops);
        CREATE INDEX ix_task_run_cache_key ON task_run(cache_key text_ops);
        
        """
    )

    op.execute(
        """
        -- Table Definition ----------------------------------------------

        CREATE TABLE task_run_state (
            id uuid DEFAULT gen_random_uuid() PRIMARY KEY,
            task_run_id uuid NOT NULL REFERENCES task_run(id) ON DELETE CASCADE,
            timestamp timestamp with time zone NOT NULL DEFAULT clock_timestamp(),
            state character varying NOT NULL,
            message character varying,
            result jsonb,
            start_time timestamp with time zone,
            serialized_state jsonb NOT NULL,
            created timestamp with time zone NOT NULL DEFAULT now(),
            updated timestamp with time zone NOT NULL DEFAULT now(),
            version integer DEFAULT 0
        );

        -- Indices -------------------------------------------------------

        CREATE INDEX ix_task_run_state_timestamp ON task_run_state(timestamp timestamptz_ops);
        CREATE INDEX ix_task_run_state_task_run_id ON task_run_state(task_run_id uuid_ops);
        CREATE INDEX ix_task_run_state_state ON task_run_state(state text_ops);

        """
    )

    op.execute(
        """

        -- Table Definition ----------------------------------------------

        CREATE TABLE schedule (
            id uuid DEFAULT gen_random_uuid() PRIMARY KEY,
            created timestamp with time zone NOT NULL DEFAULT now(),
            flow_id uuid NOT NULL REFERENCES flow(id) ON DELETE CASCADE,
            schedule jsonb NOT NULL,
            active boolean NOT NULL DEFAULT false,
            schedule_start timestamp with time zone,
            schedule_end timestamp with time zone,
            last_checked timestamp with time zone,
            last_scheduled_run_time timestamp with time zone,
            updated timestamp with time zone NOT NULL DEFAULT now()
        );

        -- Indices -------------------------------------------------------

        CREATE INDEX ix_schedule_flow_id ON schedule(flow_id uuid_ops);
        CREATE INDEX ix_schedule_schedule_end ON schedule(schedule_end timestamptz_ops);
        CREATE INDEX ix_schedule_last_scheduled_run_time ON schedule(last_scheduled_run_time timestamptz_ops);
        CREATE INDEX ix_schedule_last_checked ON schedule(last_checked timestamptz_ops);
        CREATE INDEX ix_schedule_schedule_start ON schedule(schedule_start timestamptz_ops);
        CREATE INDEX ix_schedule_active ON schedule(active bool_ops);

        """
    )
    op.execute(
        """

        -- Table Definition ----------------------------------------------

        CREATE TABLE edge (
            id uuid DEFAULT gen_random_uuid() PRIMARY KEY,
            created timestamp with time zone NOT NULL DEFAULT now(),
            flow_id uuid NOT NULL REFERENCES flow(id) ON DELETE CASCADE,
            upstream_task_id uuid NOT NULL REFERENCES task(id) ON DELETE CASCADE,
            downstream_task_id uuid NOT NULL REFERENCES task(id) ON DELETE CASCADE,
            key character varying,
            mapped boolean NOT NULL DEFAULT false,
            updated timestamp with time zone NOT NULL DEFAULT now()
        );

        -- Indices -------------------------------------------------------

        CREATE INDEX ix_edge_upstream_task_id ON edge(upstream_task_id uuid_ops);
        CREATE INDEX ix_edge_downstream_task_id ON edge(downstream_task_id uuid_ops);
        CREATE INDEX ix_edge_flow_id ON edge(flow_id uuid_ops);
        
        """
    )
    op.execute(
        """
        
        -- Table Definition ----------------------------------------------

        CREATE TABLE log (
            flow_run_id uuid REFERENCES flow_run(id) ON DELETE CASCADE,
            task_run_id uuid REFERENCES task_run(id) ON DELETE CASCADE,
            timestamp timestamp with time zone NOT NULL DEFAULT '2020-03-18 13:42:43.118776+00'::timestamp with time zone,
            name character varying,
            level character varying,
            message character varying,
            info jsonb,
            id uuid NOT NULL DEFAULT gen_random_uuid(),
            created timestamp with time zone NOT NULL DEFAULT now(),
            updated timestamp with time zone NOT NULL DEFAULT now()
        );

        -- Indices -------------------------------------------------------

        CREATE INDEX ix_log_flow_run_id ON log(flow_run_id uuid_ops);
        CREATE INDEX ix_log_task_run_id ON log(task_run_id uuid_ops);
        CREATE INDEX ix_log_timestamp ON log(timestamp timestamptz_ops DESC);

        """
    )

    # --------------------------------------------------------------------------
    # Circular relationships
    # --------------------------------------------------------------------------
    op.add_column(
        "flow_run",
        sa.Column(
            "state_id", UUID, sa.ForeignKey("flow_run_state.id", ondelete="SET NULL")
        ),
    )

    op.add_column(
        "task_run",
        sa.Column(
            "state_id", UUID, sa.ForeignKey("task_run_state.id", ondelete="SET NULL")
        ),
    )

    op.create_index("ix_flow_run__state_id", "flow_run", ["state_id"])

    op.create_index("ix_task_run__state_id", "task_run", ["state_id"])

    op.execute("CREATE SCHEMA utility;")
    op.create_table(
        "traversal",
        sa.Column("task_id", UUID),
        sa.Column("depth", sa.Integer),
        schema="utility",
    )

    op.execute(
        """
        CREATE FUNCTION utility.downstream_tasks(start_task_ids UUID[], depth_limit integer default 50)
        RETURNS SETOF utility.traversal AS

        $$
        with recursive traverse(task_id, depth) AS (
            SELECT
                -- a task id
                edge.upstream_task_id,

                -- the depth
                0

            FROM edge

            -- the starting point
            WHERE edge.upstream_task_id = ANY(start_task_ids)

            UNION

            SELECT

                -- a new task
                edge.downstream_task_id,

                -- increment the depth
                traverse.depth + 1

            FROM traverse
            INNER JOIN edge
            ON
                edge.upstream_task_id = traverse.task_id
            WHERE

                -- limit traversal to the lesser of 50 tasks or the depth_limit
                traverse.depth < 50
                AND traverse.depth < depth_limit
            )
        SELECT
            task_id,
            MAX(traverse.depth) as depth
        FROM traverse

        -- group by task_id to remove duplicate observations
        GROUP BY task_id

        -- sort by the last time a task was visited
        ORDER BY MAX(traverse.depth)

        $$ LANGUAGE sql STABLE;
    """
    )
    op.execute(
        """
        CREATE FUNCTION utility.upstream_tasks(start_task_ids UUID[], depth_limit integer default 50)
        RETURNS SETOF utility.traversal AS

        $$
        with recursive traverse(task_id, depth) AS (
            SELECT

                -- a task id
                edge.downstream_task_id,

                -- the depth
                0

            FROM edge

            -- the starting point
            WHERE edge.downstream_task_id = ANY(start_task_ids)

            UNION

            SELECT

                -- a new task
                edge.upstream_task_id,

                -- increment the depth
                traverse.depth + 1

            FROM traverse
            INNER JOIN edge
            ON
                edge.downstream_task_id = traverse.task_id
            WHERE

                -- limit traversal to the lesser of 50 tasks or the depth_limit
                traverse.depth < 50
                AND traverse.depth < depth_limit
            )
        SELECT
            task_id,
            MAX(traverse.depth) as depth
        FROM traverse

        -- group by task_id to remove duplicate observations
        GROUP BY task_id

        -- sort by the last time a task was visited
        ORDER BY MAX(traverse.depth)

        $$ LANGUAGE sql STABLE;
    """
    )
def downgrade():
    # ### commands auto generated by Alembic - please adjust! ###
    op.drop_table('nro_names_sync_job_detail')
    op.drop_table('nro_names_sync_job')

    op.execute(sa.schema.DropSequence(sa.schema.Sequence("nro_job_seq")))
Esempio n. 47
0
def upgrade():
    # BEWARE: be prepared to really spaghetti code. To deal with SQLite limitations in Alembic we coded some workarounds.

    # Migrations are supported starting form Cuckoo 0.6 and Cuckoo 1.0; I need a way to figure out if from which release
    # it will start because both schema are missing alembic release versioning.
    # I check for tags table to distinguish between Cuckoo 0.6 and 1.0.
    conn = op.get_bind()

    if conn.engine.dialect.has_table(conn.engine.connect(), "machines_tags"):
        # If this table exist we are on Cuckoo 1.0 or above.
        # So skip SQL migration.
        pass
    else:
        # We are on Cuckoo < 1.0, hopefully 0.6.
        # So run SQL migration.

        # Create table used by Tag.
        op.create_table(
            "tags",
            sa.Column("id", sa.Integer(), primary_key=True),
            sa.Column("name",
                      sa.String(length=255),
                      nullable=False,
                      unique=True),
        )

        # Create secondary table used in association Machine - Tag.
        op.create_table(
            "machines_tags",
            sa.Column("machine_id", sa.Integer, sa.ForeignKey("machines.id")),
            sa.Column("tag_id", sa.Integer, sa.ForeignKey("tags.id")),
        )

        # Add columns to Machine.
        op.add_column(
            "machines",
            sa.Column("interface", sa.String(length=255), nullable=True))
        op.add_column(
            "machines",
            sa.Column("snapshot", sa.String(length=255), nullable=True))
        # TODO: change default value, be aware sqlite doesn't support that kind of ALTER statement.
        op.add_column(
            "machines",
            sa.Column("resultserver_ip",
                      sa.String(length=255),
                      server_default="192.168.56.1",
                      nullable=False))
        # TODO: change default value, be aware sqlite doesn't support that kind of ALTER statement.
        op.add_column(
            "machines",
            sa.Column("resultserver_port",
                      sa.String(length=255),
                      server_default="2042",
                      nullable=False))

        # Deal with Alembic shit.
        # Alembic is so ORMish that it was impossible to write code which works on different DBMS.
        if conn.engine.driver == "psycopg2":
            # We don"t provide a default value and leave the column as nullable because o further data migration.
            op.add_column(
                "tasks",
                sa.Column("clock", sa.DateTime(timezone=False), nullable=True))
            # NOTE: We added this new column so we force clock time to the added_on for old analyses.
            conn.execute("update tasks set clock=added_on")
            # Add the not null constraint.
            op.alter_column("tasks",
                            "clock",
                            nullable=False,
                            existing_nullable=True)
            # Altering status ENUM.
            # This shit of raw SQL is here because alembic doesn't deal well with alter_colum of ENUM type.
            op.execute(
                'COMMIT'
            )  # Commit because SQLAlchemy doesn't support ALTER TYPE in a transaction.
            conn.execute("ALTER TYPE status_type ADD VALUE 'completed'")
            conn.execute("ALTER TYPE status_type ADD VALUE 'reported'")
            conn.execute("ALTER TYPE status_type ADD VALUE 'recovered'")
            conn.execute("ALTER TYPE status_type ADD VALUE 'running'")
            conn.execute(
                "ALTER TYPE status_type RENAME ATTRIBUTE success TO completed")
            conn.execute(
                "ALTER TYPE status_type DROP ATTRIBUTE IF EXISTS failure")
        elif conn.engine.driver == "mysqldb":
            # We don"t provide a default value and leave the column as nullable because o further data migration.
            op.add_column(
                "tasks",
                sa.Column("clock", sa.DateTime(timezone=False), nullable=True))
            # NOTE: We added this new column so we force clock time to the added_on for old analyses.
            conn.execute("update tasks set clock=added_on")
            # Add the not null constraint.
            op.alter_column("tasks",
                            "clock",
                            nullable=False,
                            existing_nullable=True,
                            existing_type=sa.DateTime(timezone=False))
            # NOTE: To workaround limitations in Alembic and MySQL ALTER statement (cannot remove item from ENUM).
            # Read data.
            tasks_data = []
            old_tasks = conn.execute(
                "select id, target, category, timeout, priority, custom, machine, package, options, platform, memory, enforce_timeout, added_on, started_on, completed_on, status, sample_id from tasks"
            ).fetchall()
            for item in old_tasks:
                d = {}
                d["id"] = item[0]
                d["target"] = item[1]
                d["category"] = item[2]
                d["timeout"] = item[3]
                d["priority"] = item[4]
                d["custom"] = item[5]
                d["machine"] = item[6]
                d["package"] = item[7]
                d["options"] = item[8]
                d["platform"] = item[9]
                d["memory"] = item[10]
                d["enforce_timeout"] = item[11]
                if isinstance(item[12], datetime):
                    d["added_on"] = item[12]
                else:
                    d["added_on"] = parse(item[12])
                if isinstance(item[13], datetime):
                    d["started_on"] = item[13]
                else:
                    d["started_on"] = parse(item[13])
                if isinstance(item[14], datetime):
                    d["completed_on"] = item[14]
                else:
                    d["completed_on"] = parse(item[14])
                d["status"] = item[15]
                d["sample_id"] = item[16]

                # Force clock.
                # NOTE: We added this new column so we force clock time to the added_on for old analyses.
                d["clock"] = d["added_on"]
                # Enum migration, "success" isn"t a valid state now.
                if d["status"] == "success":
                    d["status"] = "completed"
                tasks_data.append(d)

            # Rename original table.
            op.rename_table("tasks", "old_tasks")
            # Drop old table.
            op.drop_table("old_tasks")
            # Drop old Enum.
            sa.Enum(name="status_type").drop(op.get_bind(), checkfirst=False)
            # Create new table with 1.0 schema.
            op.create_table(
                "tasks", sa.Column("id", sa.Integer(), nullable=False),
                sa.Column("target", sa.String(length=255), nullable=False),
                sa.Column("category", sa.String(length=255), nullable=False),
                sa.Column("timeout",
                          sa.Integer(),
                          server_default="0",
                          nullable=False),
                sa.Column("priority",
                          sa.Integer(),
                          server_default="1",
                          nullable=False),
                sa.Column("custom", sa.String(length=255), nullable=True),
                sa.Column("machine", sa.String(length=255), nullable=True),
                sa.Column("package", sa.String(length=255), nullable=True),
                sa.Column("options", sa.String(length=255), nullable=True),
                sa.Column("platform", sa.String(length=255), nullable=True),
                sa.Column("memory",
                          sa.Boolean(),
                          nullable=False,
                          default=False),
                sa.Column("enforce_timeout",
                          sa.Boolean(),
                          nullable=False,
                          default=False),
                sa.Column("clock",
                          sa.DateTime(timezone=False),
                          server_default=sa.func.now(),
                          nullable=False),
                sa.Column("added_on",
                          sa.DateTime(timezone=False),
                          nullable=False),
                sa.Column("started_on",
                          sa.DateTime(timezone=False),
                          nullable=True),
                sa.Column("completed_on",
                          sa.DateTime(timezone=False),
                          nullable=True),
                sa.Column("status",
                          sa.Enum("pending",
                                  "running",
                                  "completed",
                                  "reported",
                                  "recovered",
                                  name="status_type"),
                          server_default="pending",
                          nullable=False),
                sa.Column("sample_id",
                          sa.Integer,
                          sa.ForeignKey("samples.id"),
                          nullable=True), sa.PrimaryKeyConstraint("id"))

            # Insert data.
            op.bulk_insert(db.Task.__table__, tasks_data)
        elif conn.engine.driver == "pysqlite":
            # Edit task status enumeration in Task.
            # NOTE: To workaround limitations in SQLite we have to create a temporary table, create the new schema and copy data.
            # Read data.
            tasks_data = []
            old_tasks = conn.execute(
                "select id, target, category, timeout, priority, custom, machine, package, options, platform, memory, enforce_timeout, added_on, started_on, completed_on, status, sample_id from tasks"
            ).fetchall()
            for item in old_tasks:
                d = {}
                d["id"] = item[0]
                d["target"] = item[1]
                d["category"] = item[2]
                d["timeout"] = item[3]
                d["priority"] = item[4]
                d["custom"] = item[5]
                d["machine"] = item[6]
                d["package"] = item[7]
                d["options"] = item[8]
                d["platform"] = item[9]
                d["memory"] = item[10]
                d["enforce_timeout"] = item[11]
                if isinstance(item[12], datetime):
                    d["added_on"] = item[12]
                else:
                    d["added_on"] = parse(item[12])
                if isinstance(item[13], datetime):
                    d["started_on"] = item[13]
                else:
                    d["started_on"] = parse(item[13])
                if isinstance(item[14], datetime):
                    d["completed_on"] = item[14]
                else:
                    d["completed_on"] = parse(item[14])
                d["status"] = item[15]
                d["sample_id"] = item[16]

                # Force clock.
                # NOTE: We added this new column so we force clock time to the added_on for old analyses.
                d["clock"] = d["added_on"]
                # Enum migration, "success" isn"t a valid state now.
                if d["status"] == "success":
                    d["status"] = "completed"
                tasks_data.append(d)

            # Rename original table.
            op.rename_table("tasks", "old_tasks")
            # Drop old table.
            op.drop_table("old_tasks")
            # Drop old Enum.
            sa.Enum(name="status_type").drop(op.get_bind(), checkfirst=False)
            # Create new table with 1.0 schema.
            op.create_table(
                "tasks", sa.Column("id", sa.Integer(), nullable=False),
                sa.Column("target", sa.Text(), nullable=False),
                sa.Column("category", sa.String(length=255), nullable=False),
                sa.Column("timeout",
                          sa.Integer(),
                          server_default="0",
                          nullable=False),
                sa.Column("priority",
                          sa.Integer(),
                          server_default="1",
                          nullable=False),
                sa.Column("custom", sa.String(length=255), nullable=True),
                sa.Column("machine", sa.String(length=255), nullable=True),
                sa.Column("package", sa.String(length=255), nullable=True),
                sa.Column("options", sa.String(length=255), nullable=True),
                sa.Column("platform", sa.String(length=255), nullable=True),
                sa.Column("memory",
                          sa.Boolean(),
                          nullable=False,
                          default=False),
                sa.Column("enforce_timeout",
                          sa.Boolean(),
                          nullable=False,
                          default=False),
                sa.Column("clock",
                          sa.DateTime(timezone=False),
                          server_default=sa.func.now(),
                          nullable=False),
                sa.Column("added_on",
                          sa.DateTime(timezone=False),
                          nullable=False),
                sa.Column("started_on",
                          sa.DateTime(timezone=False),
                          nullable=True),
                sa.Column("completed_on",
                          sa.DateTime(timezone=False),
                          nullable=True),
                sa.Column("status",
                          sa.Enum("pending",
                                  "running",
                                  "completed",
                                  "reported",
                                  "recovered",
                                  name="status_type"),
                          server_default="pending",
                          nullable=False),
                sa.Column("sample_id",
                          sa.Integer,
                          sa.ForeignKey("samples.id"),
                          nullable=True), sa.PrimaryKeyConstraint("id"))

            # Insert data.
            op.bulk_insert(db.Task.__table__, tasks_data)

    # Migrate mongo.
    mongo_upgrade()
Esempio n. 48
0
def downgrade():
    op.execute('alter table member rename first_name to name;')
    op.drop_column('member', 'last_name')
Esempio n. 49
0
def upgrade():
    bind = op.get_bind()
    session = Session(bind=bind)

    # Create organization table
    Organization.__table__.create(bind)

    op.create_index(
        "ix_organization_search_vector",
        "organization",
        ["search_vector"],
        unique=False,
        postgresql_using="gin",
    )

    default_org = Organization(name="default",
                               default=True,
                               description="Default dispatch organization.")
    session.add(default_org)
    session.flush()

    # create a project table
    op.create_table(
        "project",
        sa.Column("id", sa.Integer(), nullable=False),
        sa.Column("name", sa.String(), nullable=True),
        sa.Column("default", sa.Boolean(), nullable=True),
        sa.Column("description", sa.String(), nullable=True),
        sa.Column("organization_id", sa.Integer(), nullable=True),
        sa.Column("search_vector", TSVectorType(), nullable=True),
        sa.ForeignKeyConstraint(
            ["organization_id"],
            ["organization.id"],
        ),
        sa.PrimaryKeyConstraint("id"),
    )
    op.create_index(
        "ix_project_search_vector",
        "project",
        ["search_vector"],
        unique=False,
        postgresql_using="gin",
    )

    default_project = Project(
        name="default",
        default=True,
        description="Default dispatch project.",
        organization_id=default_org.id,
    )
    session.add(default_project)
    session.flush()

    # associate users with the default organization
    op.create_table(
        "dispatch_user_organization",
        sa.Column("id", sa.Integer(), nullable=False),
        sa.Column("dispatch_user_id", sa.Integer(), nullable=True),
        sa.Column("organization_id", sa.Integer(), nullable=True),
        sa.Column("role", sa.String(), nullable=True),
        sa.Column("created_at", sa.DateTime(), nullable=True),
        sa.Column("updated_at", sa.DateTime(), nullable=True),
        sa.ForeignKeyConstraint(
            ["dispatch_user_id"],
            ["dispatch_user.id"],
        ),
        sa.ForeignKeyConstraint(
            ["organization_id"],
            ["organization.id"],
        ),
        sa.PrimaryKeyConstraint("id"),
    )

    # associate users with the default project
    op.create_table(
        "dispatch_user_project",
        sa.Column("id", sa.Integer(), nullable=False),
        sa.Column("dispatch_user_id", sa.Integer(), nullable=True),
        sa.Column("project_id", sa.Integer(), nullable=True),
        sa.Column("role", sa.String(), nullable=False),
        sa.Column("created_at", sa.DateTime(), nullable=True),
        sa.Column("updated_at", sa.DateTime(), nullable=True),
        sa.ForeignKeyConstraint(
            ["dispatch_user_id"],
            ["dispatch_user.id"],
        ),
        sa.ForeignKeyConstraint(
            ["project_id"],
            ["project.id"],
        ),
        sa.PrimaryKeyConstraint("id"),
    )

    # associate all users with the current default organization and project
    for u in session.query(DispatchUser).all():
        # we make all previous admins organization admins
        organization_role = None
        if u.role == "Admin":
            organization_role = "Owner"

        session.add(
            DispatchUserOrganization(dispatch_user_id=u.id,
                                     organization_id=default_org.id,
                                     role=organization_role))

        # everybody is a regular project member for now
        session.add(
            DispatchUserProject(dispatch_user_id=u.id,
                                project_id=default_project.id))

    # we don't need role anymore
    op.drop_column("dispatch_user", "role")
    session.flush()

    # associate resources with default project
    op.add_column("definition",
                  sa.Column("project_id", sa.Integer(), nullable=True))
    op.create_foreign_key(None,
                          "definition",
                          "project", ["project_id"], ["id"],
                          ondelete="CASCADE")
    op.execute(f"update definition set project_id = {default_project.id}")

    op.add_column("document",
                  sa.Column("project_id", sa.Integer(), nullable=True))
    op.create_foreign_key(None,
                          "document",
                          "project", ["project_id"], ["id"],
                          ondelete="CASCADE")
    op.execute(f"update document set project_id = {default_project.id}")

    op.add_column("incident",
                  sa.Column("project_id", sa.Integer(), nullable=True))
    op.create_foreign_key(None,
                          "incident",
                          "project", ["project_id"], ["id"],
                          ondelete="CASCADE")
    op.execute(f"update incident set project_id = {default_project.id}")

    op.add_column("incident_cost",
                  sa.Column("project_id", sa.Integer(), nullable=True))
    op.create_foreign_key(None,
                          "incident_cost",
                          "project", ["project_id"], ["id"],
                          ondelete="CASCADE")
    op.execute(f"update incident_cost set project_id = {default_project.id}")

    op.add_column("incident_cost_type",
                  sa.Column("project_id", sa.Integer(), nullable=True))
    op.create_foreign_key(None,
                          "incident_cost_type",
                          "project", ["project_id"], ["id"],
                          ondelete="CASCADE")
    op.execute(
        f"update incident_cost_type set project_id = {default_project.id}")

    op.add_column("incident_priority",
                  sa.Column("project_id", sa.Integer(), nullable=True))
    op.create_foreign_key(None,
                          "incident_priority",
                          "project", ["project_id"], ["id"],
                          ondelete="CASCADE")
    op.execute(
        f"update incident_priority set project_id = {default_project.id}")

    op.add_column("incident_type",
                  sa.Column("project_id", sa.Integer(), nullable=True))
    op.create_foreign_key(None,
                          "incident_type",
                          "project", ["project_id"], ["id"],
                          ondelete="CASCADE")
    op.execute(f"update incident_type set project_id = {default_project.id}")

    op.add_column("individual_contact",
                  sa.Column("project_id", sa.Integer(), nullable=True))
    op.create_foreign_key(None,
                          "individual_contact",
                          "project", ["project_id"], ["id"],
                          ondelete="CASCADE")
    op.execute(
        f"update individual_contact set project_id = {default_project.id}")

    op.add_column("notification",
                  sa.Column("project_id", sa.Integer(), nullable=True))
    op.create_foreign_key(None,
                          "notification",
                          "project", ["project_id"], ["id"],
                          ondelete="CASCADE")
    op.execute(f"update notification set project_id = {default_project.id}")

    op.add_column("plugin", sa.Column("project_id",
                                      sa.Integer(),
                                      nullable=True))
    op.create_foreign_key(None,
                          "plugin",
                          "project", ["project_id"], ["id"],
                          ondelete="CASCADE")
    op.execute(f"update plugin set project_id = {default_project.id}")

    op.add_column("search_filter",
                  sa.Column("project_id", sa.Integer(), nullable=True))
    op.create_foreign_key(None,
                          "search_filter",
                          "project", ["project_id"], ["id"],
                          ondelete="CASCADE")
    op.execute(f"update search_filter set project_id = {default_project.id}")

    op.add_column("service",
                  sa.Column("project_id", sa.Integer(), nullable=True))
    op.create_foreign_key(None,
                          "service",
                          "project", ["project_id"], ["id"],
                          ondelete="CASCADE")
    op.execute(f"update service set project_id = {default_project.id}")

    op.add_column("tag", sa.Column("project_id", sa.Integer(), nullable=True))
    op.create_foreign_key(None,
                          "tag",
                          "project", ["project_id"], ["id"],
                          ondelete="CASCADE")
    op.execute(f"update tag set project_id = {default_project.id}")

    op.add_column("tag_type",
                  sa.Column("project_id", sa.Integer(), nullable=True))
    op.create_foreign_key(None,
                          "tag_type",
                          "project", ["project_id"], ["id"],
                          ondelete="CASCADE")
    op.execute(f"update tag_type set project_id = {default_project.id}")

    op.add_column("team_contact",
                  sa.Column("project_id", sa.Integer(), nullable=True))
    op.create_foreign_key(None,
                          "team_contact",
                          "project", ["project_id"], ["id"],
                          ondelete="CASCADE")
    op.execute(f"update team_contact set project_id = {default_project.id}")

    op.add_column("term", sa.Column("project_id", sa.Integer(), nullable=True))
    op.create_foreign_key(None,
                          "term",
                          "project", ["project_id"], ["id"],
                          ondelete="CASCADE")
    op.execute(f"update term set project_id = {default_project.id}")

    op.add_column("workflow",
                  sa.Column("project_id", sa.Integer(), nullable=True))
    op.create_foreign_key(None,
                          "workflow",
                          "project", ["project_id"], ["id"],
                          ondelete="CASCADE")
    op.execute(f"update workflow set project_id = {default_project.id}")
Esempio n. 50
0
def upgrade():

    op.get_bind()
    op.execute(drop_all_dashboard_helper_views)

    # Text values were converted from department_source_text to a department_source_id in revision addb446d684c
    # There are a few pages where the text value was not mapped to an org - these updates are to backfill
    # older versions of a page with missing department_source_id to match the most recent published version of the page

    op.execute("""
             UPDATE page SET department_source_id = 'EO1216'
             WHERE department_source_text = 'Department for Education and Education and Skills Funding Agency'
             AND department_source_id IS NULL;

             UPDATE page SET department_source_id = 'D1198'
             WHERE department_source_text = 'Start Up Loans Company'
             AND department_source_id IS NULL;
        """)
    op.drop_column("page", "department_source_text")

    op.execute(latest_published_pages_view)
    op.execute(pages_by_geography_view)
    op.execute(ethnic_groups_by_dimension_view)
    op.execute(categorisations_by_dimension)
def upgrade():
    op.execute(
        """UPDATE "user" SET name = REPLACE(name, '`', '') WHERE name like '%`%'"""
    )
Esempio n. 52
0
def upgrade():
    # ### commands auto generated by Alembic - please adjust! ###
    op.create_table(
        'artists', sa.Column('id', sa.Integer(), nullable=False),
        sa.Column('name', sa.String(), nullable=True),
        sa.Column('city', sa.String(length=120), nullable=True),
        sa.Column('state', sa.String(length=120), nullable=True),
        sa.Column('phone', sa.String(length=120), nullable=True),
        sa.Column('image_link', sa.String(length=500), nullable=True),
        sa.Column('website', sa.String(length=120), nullable=True),
        sa.Column('facebook_link', sa.String(length=120), nullable=True),
        sa.Column('seeking_venue', sa.Boolean(), nullable=False),
        sa.Column('seeking_description', sa.String(length=120), nullable=True),
        sa.PrimaryKeyConstraint('id'))
    op.create_table('genres', sa.Column('id', sa.Integer(), nullable=False),
                    sa.Column('name', sa.String(), nullable=True),
                    sa.PrimaryKeyConstraint('id'))
    op.create_table(
        'venues', sa.Column('id', sa.Integer(), nullable=False),
        sa.Column('name', sa.String(), nullable=True),
        sa.Column('city', sa.String(length=120), nullable=True),
        sa.Column('state', sa.String(length=120), nullable=True),
        sa.Column('address', sa.String(length=120), nullable=True),
        sa.Column('phone', sa.String(length=120), nullable=True),
        sa.Column('image_link', sa.String(length=500), nullable=True),
        sa.Column('website', sa.String(length=120), nullable=True),
        sa.Column('facebook_link', sa.String(length=120), nullable=True),
        sa.Column('seeking_talent', sa.Boolean(), nullable=False),
        sa.Column('seeking_description', sa.String(length=120), nullable=True),
        sa.PrimaryKeyConstraint('id'))
    op.create_table('artists_genres',
                    sa.Column('artist_id', sa.Integer(), nullable=False),
                    sa.Column('genre_id', sa.Integer(), nullable=False),
                    sa.ForeignKeyConstraint(
                        ['artist_id'],
                        ['artists.id'],
                    ), sa.ForeignKeyConstraint(
                        ['genre_id'],
                        ['genres.id'],
                    ), sa.PrimaryKeyConstraint('artist_id', 'genre_id'))
    op.create_table('genres_venues',
                    sa.Column('genre_id', sa.Integer(), nullable=False),
                    sa.Column('venue_id', sa.Integer(), nullable=False),
                    sa.ForeignKeyConstraint(
                        ['genre_id'],
                        ['genres.id'],
                    ), sa.ForeignKeyConstraint(
                        ['venue_id'],
                        ['venues.id'],
                    ), sa.PrimaryKeyConstraint('genre_id', 'venue_id'))
    op.create_table('shows', sa.Column('id', sa.Integer(), nullable=False),
                    sa.Column('artist_id', sa.Integer(), nullable=False),
                    sa.Column('venue_id', sa.Integer(), nullable=False),
                    sa.Column('start_time', sa.DateTime(), nullable=True),
                    sa.ForeignKeyConstraint(
                        ['artist_id'],
                        ['artists.id'],
                    ), sa.ForeignKeyConstraint(
                        ['venue_id'],
                        ['venues.id'],
                    ), sa.PrimaryKeyConstraint('id'))
    # ### end Alembic commands ###

    # insert genres to the DB
    op.execute('''
        INSERT INTO genres
        VALUES 
            (1, 'Alternative'),
            (2, 'Blues'),
            (3, 'Classical'),
            (4, 'Country'),
            (5, 'Electronic'),
            (6, 'Folk'),
            (7, 'Funk'),
            (8, 'Hip-Hop'),
            (9, 'Heavy Metal'),
            (10, 'Instrumental'),
            (11, 'Jazz'),
            (12, 'Musical Theatre'),
            (13, 'Pop'),
            (14, 'Punk'),
            (15, 'R&B'),
            (16, 'Reggae'),
            (17, 'Rock n Roll'),
            (18, 'Soul'),
            (19, 'Other')
    ''')
Esempio n. 53
0
def upgrade():
    op.add_column(
        "users",
        sa.Column("registered", sa.DateTime(timezone=True), nullable=True))
    op.execute("update users set registered = current_timestamp")
    op.alter_column("users", "registered", nullable=False)
def downgrade():
    # ### commands auto generated by Alembic - please adjust! ###
    op.alter_column('payment', 'paid_amount', new_column_name='amount')
    op.drop_column('payment', 'invoice_amount')
    op.execute("delete from invoice_status_code where code='PARTIAL_PAID'")
def upgrade():
    op.execute("DELETE FROM events;")
    op.execute("ALTER TABLE events DROP COLUMN event_metadata;")
    op.execute("ALTER TABLE events ADD COLUMN event_metadata TEXT;")
Esempio n. 56
0
def downgrade():
    new_values = (
        "WEBAPP_SIGNUP",
        "DEGRESSIVE_REIMBURSEMENT_RATE",
        "QR_CODE",
        "FULL_OFFERS_SEARCH_WITH_OFFERER_AND_VENUE",
        "SEARCH_ALGOLIA",
        "SEARCH_LEGACY",
        "BENEFICIARIES_IMPORT",
        "SYNCHRONIZE_ALGOLIA",
        "SYNCHRONIZE_ALLOCINE",
        "SYNCHRONIZE_BANK_INFORMATION",
        "SYNCHRONIZE_LIBRAIRES",
        "SYNCHRONIZE_TITELIVE",
        "SYNCHRONIZE_TITELIVE_PRODUCTS",
        "SYNCHRONIZE_TITELIVE_PRODUCTS_DESCRIPTION",
        "SYNCHRONIZE_TITELIVE_PRODUCTS_THUMBS",
        "UPDATE_DISCOVERY_VIEW",
        "UPDATE_BOOKING_USED",
        "RECOMMENDATIONS_WITH_DISCOVERY_VIEW",
        "RECOMMENDATIONS_WITH_DIGITAL_FIRST",
        "RECOMMENDATIONS_WITH_GEOLOCATION",
        "NEW_RIBS_UPLOAD",
    )
    previous_values = (
        "WEBAPP_SIGNUP",
        "DEGRESSIVE_REIMBURSEMENT_RATE",
        "QR_CODE",
        "FULL_OFFERS_SEARCH_WITH_OFFERER_AND_VENUE",
        "SEARCH_ALGOLIA",
        "SEARCH_LEGACY",
        "BENEFICIARIES_IMPORT",
        "SYNCHRONIZE_ALGOLIA",
        "SYNCHRONIZE_ALLOCINE",
        "SYNCHRONIZE_BANK_INFORMATION",
        "SYNCHRONIZE_LIBRAIRES",
        "SYNCHRONIZE_TITELIVE",
        "SYNCHRONIZE_TITELIVE_PRODUCTS",
        "SYNCHRONIZE_TITELIVE_PRODUCTS_DESCRIPTION",
        "SYNCHRONIZE_TITELIVE_PRODUCTS_THUMBS",
        "UPDATE_DISCOVERY_VIEW",
        "UPDATE_BOOKING_USED",
        "RECOMMENDATIONS_WITH_DISCOVERY_VIEW",
        "RECOMMENDATIONS_WITH_DIGITAL_FIRST",
        "RECOMMENDATIONS_WITH_GEOLOCATION",
        "NEW_RIBS_UPLOAD",
        "SAVE_SEEN_OFFERS",
    )

    previous_enum = sa.Enum(*previous_values, name="featuretoggle")
    new_enum = sa.Enum(*new_values, name="featuretoggle")
    temporary_enum = sa.Enum(*new_values, name="tmp_featuretoggle")

    op.execute("DELETE FROM feature WHERE name = 'SAVE_SEEN_OFFERS'")
    temporary_enum.create(op.get_bind(), checkfirst=False)
    op.execute("ALTER TABLE feature ALTER COLUMN name TYPE tmp_featuretoggle"
               " USING name::text::tmp_featuretoggle")
    previous_enum.drop(op.get_bind(), checkfirst=False)
    new_enum.create(op.get_bind(), checkfirst=False)
    op.execute("ALTER TABLE feature ALTER COLUMN name TYPE featuretoggle"
               " USING name::text::featuretoggle")
    temporary_enum.drop(op.get_bind(), checkfirst=False)
def upgrade():
    t = table('imapuid', column('deleted_at', sa.DateTime()))

    op.execute(t.delete().where(t.c.deleted_at != None))  # noqa: E711
def downgrade():
    op.execute("DELETE FROM events;")
    op.execute("ALTER TABLE events DROP COLUMN event_metadata;")
    op.execute("ALTER TABLE events ADD COLUMN event_metadata JSONB NOT NULL DEFAULT '{}';")
Esempio n. 59
0
def upgrade():
    table_merchant = op.create_table(
        'merchant', sa.Column('id', sa.Integer, primary_key=True),
        sa.Column('email', sa.String(255)), sa.Column('name', sa.String(255)),
        sa.Column('description', sa.Text), sa.Column('phone', sa.String(255)),
        sa.Column('zip', sa.String(255)), sa.Column('city', sa.String(255)),
        sa.Column('uf', sa.String(2)), sa.Column('neighborhood',
                                                 sa.String(255)),
        sa.Column('address', sa.String(255)),
        sa.Column('address_extra', sa.String(255)), sa.Column('lat', sa.Float),
        sa.Column('lon', sa.Float),
        sa.Column('max_distance',
                  sa.Integer,
                  server_default=sa.schema.DefaultClause('20')),
        sa.Column('delivery', sa.Boolean),
        sa.Column('status',
                  sa.Integer,
                  server_default=sa.schema.DefaultClause('1')),
        sa.PrimaryKeyConstraint("id"))
    op.execute('CREATE EXTENSION IF NOT EXISTS cube')
    op.execute('CREATE EXTENSION IF NOT EXISTS earthdistance')

    op.bulk_insert(table_merchant, [{
        'email': '*****@*****.**',
        'name': 'UzaPrint',
        'phone': '5548996415657',
        'lat': -28.4812066,
        'lon': -49.0064517,
        'zip': '88701105',
        'city': 'Tubarão',
        'uf': 'SC',
        'neighborhood': 'Centro',
        'address': 'Av. Marcolino Martins Cabral, 1315',
        'address_extra': 'Praça Shopping',
        'max_distance': 30,
        'delivery': True,
        'created_at': datetime.now()
    }, {
        'email': '*****@*****.**',
        'name': 'HDA Personalizações',
        'phone': '5548991119511',
        'lat': -28.4734123,
        'lon': -49.0130107,
        'zip': '88704400',
        'city': 'Tubarão',
        'uf': 'SC',
        'neighborhood': 'Humaitá',
        'address': 'R. Roberto Zumblick, 822',
        'address_extra': 'Sala 02',
        'max_distance': 30,
        'delivery': False,
        'created_at': datetime.now()
    }, {
        'email': '*****@*****.**',
        'name': 'Marka Produtos Personalizados',
        'phone': '5548991119511',
        'lat': -29.113707,
        'lon': -51.093622,
        'zip': '95060145',
        'city': 'Caxias do Sul',
        'uf': 'RS',
        'neighborhood': 'Ana Rech',
        'address': 'Av. Rio Branco',
        'address_extra': '',
        'max_distance': 30,
        'delivery': False,
        'created_at': datetime.now()
    }, {
        'email': '',
        'name': 'Exclusive Personalizados',
        'phone': '5551995093837',
        'lat': -29.7212371,
        'lon': -52.4328594,
        'zip': '96810124',
        'city': 'Santa Cruz do Sul',
        'uf': 'RS',
        'neighborhood': 'Centro',
        'address': 'Rua Venâncio Aires, 1102',
        'address_extra': '',
        'max_distance': 30,
        'delivery': False,
        'created_at': datetime.now()
    }])
Esempio n. 60
0
def downgrade_flask_state_sqlite():
    op.create_table(
        "flask_state_host_dg_tmp",
        sa.Column(
            "id",
            mysql.INTEGER(unsigned=True),
            autoincrement=True,
            nullable=False,
        ),
        sa.Column(
            "create_time",
            mysql.DATETIME(),
            server_default=sa.text("(CURRENT_TIMESTAMP)"),
            nullable=True,
        ),
        sa.Column(
            "update_time",
            mysql.DATETIME(),
            server_default=sa.text("(CURRENT_TIMESTAMP)"),
            nullable=True,
        ),
        sa.Column(
            "cpu",
            mysql.FLOAT(unsigned=True),
            server_default=sa.text("0"),
            nullable=True,
        ),
        sa.Column(
            "memory",
            mysql.FLOAT(unsigned=True),
            server_default=sa.text("0"),
            nullable=True,
        ),
        sa.Column(
            "load_avg", sa.String(length=32), server_default="", nullable=True
        ),
        sa.Column(
            "disk_usage",
            mysql.FLOAT(unsigned=True),
            server_default=sa.text("0"),
            nullable=True,
        ),
        sa.Column(
            "boot_seconds",
            mysql.INTEGER(unsigned=True),
            server_default=sa.text("0"),
            nullable=True,
        ),
        sa.Column(
            "ts",
            mysql.BIGINT(unsigned=True),
            server_default=sa.text("0"),
            nullable=True,
        ),
        sa.Column(
            "used_memory",
            mysql.INTEGER(unsigned=True),
            server_default=sa.text("0"),
            nullable=True,
        ),
        sa.Column(
            "used_memory_rss",
            mysql.INTEGER(unsigned=True),
            server_default=sa.text("0"),
            nullable=True,
        ),
        sa.Column(
            "connected_clients",
            mysql.SMALLINT(unsigned=True),
            server_default=sa.text("0"),
            nullable=True,
        ),
        sa.Column(
            "uptime_in_seconds",
            mysql.INTEGER(unsigned=True),
            server_default=sa.text("0"),
            nullable=True,
        ),
        sa.Column(
            "mem_fragmentation_ratio",
            mysql.FLOAT(unsigned=True),
            server_default=sa.text("0"),
            nullable=True,
        ),
        sa.Column(
            "keyspace_hits",
            mysql.INTEGER(unsigned=True),
            server_default=sa.text("0"),
            nullable=True,
        ),
        sa.Column(
            "keyspace_misses",
            mysql.INTEGER(unsigned=True),
            server_default=sa.text("0"),
            nullable=True,
        ),
        sa.Column(
            "hits_ratio",
            mysql.FLOAT(unsigned=True),
            server_default=sa.text("0"),
            nullable=True,
        ),
        sa.Column(
            "delta_hits_ratio",
            mysql.FLOAT(unsigned=True),
            server_default=sa.text("0"),
            nullable=True,
        ),
        sa.PrimaryKeyConstraint("id"),
    )
    op.execute(
        "insert into flask_state_host_dg_tmp(id, create_time, update_time, cpu, memory, load_avg, disk_usage, boot_seconds, ts, used_memory, used_memory_rss, connected_clients, uptime_in_seconds, mem_fragmentation_ratio, keyspace_hits, keyspace_misses, hits_ratio, delta_hits_ratio) select id, create_time, update_time, cpu, memory, load_avg, disk_usage, boot_seconds, ts, used_memory, used_memory_rss, connected_clients, uptime_in_seconds, mem_fragmentation_ratio, keyspace_hits, keyspace_misses, hits_ratio, delta_hits_ratio from flask_state_host;"
    )
    op.drop_table("flask_state_host")
    op.rename_table("flask_state_host_dg_tmp", "flask_state_host")
    op.create_index(
        "idx_host_ts", "flask_state_host", [sa.text("ts DESC")], unique=False
    )