Beispiel #1
0
def upgrade():
    op.add_column('answer', sa.Column('practice', sa.Boolean(name='practice'),
        default='0', server_default='0', nullable=False, index=True))

    connection = op.get_bind()

    comparison_example_table = sa.table('comparison_example',
        sa.Column('answer1_id', sa.Integer),
        sa.Column('answer2_id', sa.Integer),
    )

    answer_table = sa.table('answer',
        sa.column('id', sa.Integer),
        sa.Column('practice', sa.Boolean)
    )

    answer_ids = set()
    for comparison_example in connection.execute(comparison_example_table.select()):
        answer_ids.add(comparison_example.answer1_id)
        answer_ids.add(comparison_example.answer2_id)

    answer_ids = list(answer_ids)
    if len(answer_ids) > 0:
        connection.execute(
            answer_table.update().where(
                answer_table.c.id.in_(answer_ids)
            ).values(
                practice=True
            )
        )
def upgrade():
    conn = op.get_bind()

    cocols = [
        'created', 'updated', 'active', 'version', 'polymorphic_type',
        'id', 'name',
    ]
    co = sa.table('contribution', *map(sa.column, ['pk'] + cocols))
    chcols = ['pk', 'sortkey']
    ch = sa.table('chapter', *map(sa.column, chcols))

    id_, name = map(sa.bindparam, ['id_', 'name'])

    cowhere = [co.c.id == id_, co.c.name == name]

    insert_co = co.insert(bind=conn).from_select(cocols,
        sa.select([sa.func.now(), sa.func.now(), True, 1, sa.literal('custom'), id_, name])
        .where(~sa.exists().where(sa.or_(*cowhere))))

    co_pk = sa.select([co.c.pk]).where(sa.and_(*cowhere)).as_scalar()
    sortkey = sa.bindparam('sortkey')

    insert_ch = ch.insert(bind=conn).from_select(chcols,
        sa.select([co_pk, sortkey])
        .where(~sa.exists().where(ch.c.pk == co.c.pk).where(sa.or_(*cowhere)))
        .where(sa.exists().where(sa.and_(*cowhere))))

    insert_co.execute(id_=ID, name=NAME)
    insert_ch.execute(id_=ID, name=NAME, sortkey=SORTKEY)
def upgrade():
    conn = op.get_bind()

    l = sa.table('language', *map(sa.column, ['pk', 'id', 'name']))
    ccols = ['created', 'updated', 'active', 'id', 'name', 'continent']
    c = sa.table('country', *map(sa.column, ['pk'] + ccols))
    lccols = ['created', 'updated', 'active', 'language_pk', 'country_pk']
    lc = sa.table('countrylanguage', *map(sa.column, lccols))

    lwhere = (l.c.id == sa.bindparam('id_'))

    cid, cname, ccont = map(sa.bindparam, ['cc', 'name', 'continent'])
    cwhere = (c.c.id == cid)

    insert_c = c.insert(bind=conn).from_select(ccols,
        sa.select([sa.func.now(), sa.func.now(), True, cid, cname, ccont])
        .where(~sa.exists().where(cwhere)))

    liwhere = sa.exists()\
        .where(lc.c.language_pk == l.c.pk).where(lwhere)\
        .where(lc.c.country_pk == c.c.pk).where(cwhere)

    unlink_country = lc.delete(bind=conn).where(liwhere)

    l_pk = sa.select([l.c.pk]).where(lwhere).as_scalar()
    c_pk = sa.select([c.c.pk]).where(cwhere).as_scalar()

    link_country = lc.insert(bind=conn).from_select(lccols,
        sa.select([sa.func.now(), sa.func.now(), True, l_pk, c_pk])
        .where(~liwhere))

    insert_c.execute(cc=AFTER, name=NAME, continent=CONTINENT)
    for id_ in IDS:
        unlink_country.execute(id_=id_, cc=BEFORE)
        link_country.execute(id_=id_, cc=AFTER)
Beispiel #4
0
def upgrade():
    ### commands auto generated by Alembic - please adjust! ###
    subsc = op.create_table(
        'subscription',
        sa.Column('id', sa.Integer(), nullable=False),
        sa.Column('user_id', sa.Integer(), sa.ForeignKey('user.id'), nullable=False),
        sa.Column('feed_id', sa.Integer(), sa.ForeignKey('feed.id'), nullable=False),
        sa.Column('name', sa.String(length=256), nullable=True),
        sa.Column('tags', sa.String(length=256), nullable=True),
        sa.PrimaryKeyConstraint('id')
    )

    feed = sa.table(
        'feed',
        sa.column('id', sa.Integer()),
        sa.column('name', sa.String()))

    u2f = sa.table(
        'users_to_feeds',
        sa.column('user_id', sa.Integer()),
        sa.column('feed_id', sa.Integer()))

    values = sa.select(
        [u2f.c.user_id, u2f.c.feed_id, feed.c.name]
    ).select_from(
        u2f.join(feed, feed.c.id == u2f.c.feed_id)
    )

    op.execute(subsc.insert().from_select(
        ['user_id', 'feed_id', 'name'], values))

    op.drop_table('users_to_feeds')
Beispiel #5
0
    def execute(self, connection, filter_values):
        max_date_query = sqlalchemy.select([
            sqlalchemy.func.max(sqlalchemy.column('completed_on')).label('completed_on'),
            sqlalchemy.column('case_id').label('case_id')
        ]).select_from(sqlalchemy.table(self.table_name))

        if self.filters:
            for filter in self.filters:
                max_date_query.append_whereclause(filter.build_expression())

        max_date_query.append_group_by(
            sqlalchemy.column('case_id')
        )

        max_date_subquery = sqlalchemy.alias(max_date_query, 'max_date')

        asha_table = self.get_asha_table_name()
        checklist_query = sqlalchemy.select()
        for column in self.columns:
            checklist_query.append_column(column.build_column())

        checklist_query = checklist_query.where(
            sqlalchemy.literal_column('"{}".case_id'.format(asha_table)) == max_date_subquery.c.case_id
        ).where(
            sqlalchemy.literal_column('"{}".completed_on'.format(asha_table)) == max_date_subquery.c.completed_on
        ).select_from(sqlalchemy.table(asha_table))

        return connection.execute(checklist_query, **filter_values).fetchall()
Beispiel #6
0
    def test_unconsumed_names_values_dict(self):
        t = table("t", column("x"), column("y"))
        t2 = table("t2", column("q"), column("z"))

        assert_raises_message(
            exc.CompileError,
            "Unconsumed column names: j",
            t.update().values(x=5, j=7).values({t2.c.z: 5}).
            where(t.c.x == t2.c.q).compile,
        )
Beispiel #7
0
def test_compile_with_one_unnamed_table():
    t = ibis.table([('a', 'string')])
    s = ibis.table([('b', 'string')], name='s')
    join = t.join(s, t.a == s.b)
    result = ibis.sqlite.compile(join)
    sqla_t = sa.table('t0', sa.column('a', sa.String)).alias('t0')
    sqla_s = sa.table('s', sa.column('b', sa.String)).alias('t1')
    sqla_join = sqla_t.join(sqla_s, sqla_t.c.a == sqla_s.c.b)
    expected = sa.select([sqla_t.c.a, sqla_s.c.b]).select_from(sqla_join)
    assert str(result) == str(expected)
def upgrade():
    conn = op.get_bind()

    language = sa.table('language', *map(sa.column, ['pk', 'id', 'name', 'updated']))
    lid = sa.bindparam('id_')
    lbefore = sa.bindparam('before')
    update_lang = sa.update(language, bind=conn)\
        .where(sa.and_(
            language.c.id == lid,
            language.c.name == lbefore))\
        .values(updated=sa.func.now(), name=sa.bindparam('after'))

    walslanguage = sa.table('walslanguage', *map(sa.column, ['pk', 'ascii_name']))
    aname = sa.bindparam('ascii_name')
    update_wals = sa.update(walslanguage, bind=conn)\
        .where(sa.exists().where(sa.and_(
            language.c.pk == walslanguage.c.pk,
            language.c.id == lid))\
        .where(walslanguage.c.ascii_name != aname))\
        .values(ascii_name=aname)

    icols = ['created', 'updated', 'active', 'version', 'type', 'description', 'lang', 'name']
    identifier = sa.table('identifier', *map(sa.column, ['pk'] + icols))
    itype, idesc, ilang = (sa.bindparam(*a) for a in [('type', 'name'), ('description', 'other'), ('lang', 'en')])
    iname = sa.bindparam('name')
    iwhere = sa.and_(
        identifier.c.type == itype,
        identifier.c.description == idesc,
        identifier.c.lang == ilang,
        identifier.c.name == iname)
    insert_ident = sa.insert(identifier, bind=conn).from_select(icols,
        sa.select([sa.func.now(), sa.func.now(), True, 1, itype, idesc, ilang, iname])
        .where(~sa.exists().where(iwhere)))

    licols = ['created', 'updated', 'active', 'version', 'language_pk', 'identifier_pk']
    languageidentifier = sa.table('languageidentifier', *map(sa.column, licols))
    l_pk = sa.select([language.c.pk]).where(language.c.id == lid)
    i_pk = sa.select([identifier.c.pk]).where(sa.and_(iwhere))
    insert_lang_ident = sa.insert(languageidentifier, bind=conn).from_select(licols,
        sa.select([sa.func.now(), sa.func.now(), True, 1, l_pk.as_scalar(), i_pk.as_scalar()])
        .where(~sa.exists().where(sa.and_(
            languageidentifier.c.language_pk == l_pk,
            languageidentifier.c.identifier_pk == i_pk))))

    for id_, (before, after, keep) in sorted(ID_BEFORE_AFTER_KEEP.items()):
        update_lang.execute(id_=id_, before=before, after=after)
        update_wals.execute(id_=id_, ascii_name=ascii_name(after))
        if keep:
            insert_ident.execute(name=before)
            insert_lang_ident.execute(id_=id_, name=before)
Beispiel #9
0
    def test_compare_tables(self):
        is_true(table_a.compare(table_a_2))

        # the "proxy" version compares schema tables on metadata identity
        is_false(table_a.compare(table_a_2, use_proxies=True))

        # same for lower case tables since it compares lower case columns
        # using proxies, which makes it very unlikely to have multiple
        # table() objects with columns that compare equally
        is_false(
            table("a", column("x", Integer), column("q", String)).compare(
                table("a", column("x", Integer), column("q", String)),
                use_proxies=True,
            )
        )
Beispiel #10
0
def upgrade():
    from sqlalchemy.sql import text
    enum_values_table = sa.table('enum_values',
                                 sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
                                 sa.Column('type_id', sa.Integer(), nullable=True),
                                 sa.Column('code', sa.String(length=32), nullable=True),
                                 sa.Column('display', sa.String(length=64), nullable=False),
                                 )
    res = op.get_bind().execute('SELECT max(id)+1 FROM enum_values')
    results = res.fetchall()
    cm = 14
    for r in results:
        cm = r[0]
    op.bulk_insert(enum_values_table, [
        {'id': cm, 'type_id': 1, 'code': 'INVENTORY_TRANSACTION_TYPE', 'display': u'库存变动类型'},
        {'id': cm + 1, 'type_id': 1, 'code': 'RECEIVING_STATUS', 'display': u'收货单状态'},
        {'id': cm + 2, 'type_id': 1, 'code': 'PURCHASE_ORDER_STATUS', 'display': u'采购单状态'},
        {'id': cm + 3, 'type_id': 1, 'code': 'SHIPPING_STATUS', 'display': u'发货单状态'},
        {'id': cm + 4, 'type_id': cm, 'code': 'PURCHASE_IN', 'display': u'采购入库'},
        {'id': cm + 5, 'type_id': cm, 'code': 'SALES_OUT', 'display': u'销售出库'},
        {'id': cm + 6, 'type_id': cm, 'code': 'INVENTORY_DAMAGED', 'display': u'商品损毁'},
        {'id': cm + 7, 'type_id': cm, 'code': 'INVENTORY_LOST', 'display': u'商品丢失'},
        {'id': cm + 8, 'type_id': cm + 1, 'code': 'RECEIVING_DRAFT', 'display': u'收货单草稿'},
        {'id': cm + 9, 'type_id': cm + 1, 'code': 'RECEIVING_COMPLETE', 'display': u'收货单完成'},
        {'id': cm + 10, 'type_id': cm + 2, 'code': 'PURCHASE_ORDER_DRAFT', 'display': u'草稿'},
        {'id': cm + 11, 'type_id': cm + 2, 'code': 'PURCHASE_ORDER_ISSUED', 'display': u'已发出'},
        {'id': cm + 12, 'type_id': cm + 2, 'code': 'PURCHASE_ORDER_PART_RECEIVED', 'display': u'部分收货'},
        {'id': cm + 13, 'type_id': cm + 2, 'code': 'PURCHASE_ORDER_RECEIVED', 'display': u'收货完成'},
        {'id': cm + 14, 'type_id': cm + 3, 'code': 'SHIPPING_COMPLETE', 'display': u'发货完成'},
    ], multiinsert=False)
    op.get_bind().execute(text("ALTER SEQUENCE enum_values_id_seq RESTART WITH " + str(cm + 15) + ";"))
Beispiel #11
0
    def __init__(self, name, query,
                 column_names=None,
                 temporary=False,
                 view_options=None,
                 check_option=None):
        """DDL Element representing a VIEW

        :param name: The name of the view
        :param query: the query it represents
        :param column_names:
        :param temporary:
        :param view_options: Must be something that can be passed to
            OrderedDict, so a simple dict suffices.
        :param check_option: Must be one of ``None``, ``'local'``,
            ``'cascaded'``.
        """
        self.name = name
        self.query = query
        self.table = table(name)
        self.temporary = temporary
        self.column_names = column_names
        self._init_table_columns()
        if view_options is None:
            view_options = OrderedDict()
        else:
            view_options = OrderedDict(view_options)
        self.view_options = view_options
        if check_option not in (None, 'local', 'cascaded'):
            raise ValueError("check_option must be either None, 'local', or "
                             "'cascaded'")
        if check_option is not None and 'check_option' in view_options:
            raise ValueError('Parameter "check_option" specified more than '
                             'once')
        self.check_option = check_option
def upgrade():
    # step 1, add new uuid column
    op.add_column("comparison", sa.Column("uuid", sa.CHAR(22), nullable=False, server_default=""))

    connection = op.get_bind()
    # step 2, fill in unique uuids (base64 url safe strings)
    comparison_table = sa.table("comparison", sa.Column("id", sa.Integer()), sa.Column("uuid", sa.CHAR(22)))
    for record in connection.execute(comparison_table.select()):
        connection.execute(
            comparison_table.update()
            .where(comparison_table.c.id == record.id)
            .values(uuid=str(base64.urlsafe_b64encode(uuid.uuid4().bytes)).replace("=", ""))
        )

    # step 3, apply unique constraint on generated table
    with op.batch_alter_table("comparison", naming_convention=convention) as batch_op:
        batch_op.create_unique_constraint("uq_comparison_uuid", ["uuid"])

    # step 4 create xapi_log table
    op.create_table(
        "xapi_log",
        sa.Column("id", sa.Integer(), nullable=False),
        sa.Column("statement", sa.Text(), nullable=True),
        sa.Column("modified_user_id", sa.Integer(), nullable=True),
        sa.Column("modified", sa.DateTime(), nullable=False),
        sa.Column("created_user_id", sa.Integer(), nullable=True),
        sa.Column("created", sa.DateTime(), nullable=False),
        sa.ForeignKeyConstraint(["created_user_id"], ["user.id"], ondelete="SET NULL"),
        sa.ForeignKeyConstraint(["modified_user_id"], ["user.id"], ondelete="SET NULL"),
        sa.PrimaryKeyConstraint("id"),
        mysql_charset="utf8",
        mysql_collate="utf8_unicode_ci",
        mysql_engine="InnoDB",
    )
def upgrade():
    op.create_table(
        u'client_authentication_mode',
        sa.Column(u'name', sa.String(10), primary_key=True),
    )

    # Create temporary table for table data seeding
    insert_table = sa.table(
        u'client_authentication_mode',
        sa.column(u'name', sa.String),
    )

    op.bulk_insert(
        insert_table,
        [
            {'name': constants.CLIENT_AUTH_NONE},
            {'name': constants.CLIENT_AUTH_OPTIONAL},
            {'name': constants.CLIENT_AUTH_MANDATORY}
        ]
    )

    op.add_column(
        u'listener',
        sa.Column(u'client_authentication', sa.String(10),
                  sa.ForeignKey('client_authentication_mode.name'),
                  server_default=constants.CLIENT_AUTH_NONE, nullable=False)
    )
def upgrade():
    import os
    import hashlib
    HASH_SIZE = 32  # sha256 -> 32 bytes
    default_password = '******'

    # current users are by default active
    op.add_column('users', sa.Column('_active', sa.Boolean(), nullable=True,
                                     server_default=sa.sql.expression.true()))

    # reate the new columns
    op.add_column('users', sa.Column('_salt', sa.Binary(length=HASH_SIZE), nullable=True))
    op.add_column('users', sa.Column('_hashed_password', sa.Binary(length=HASH_SIZE), nullable=True))

    # Store the salt and the hashed default password
    users = sa.table('users', sa.Column('id'),
                     sa.Column('_salt'),
                     sa.Column('_hashed_password'))
    connection = op.get_bind()
    for user in connection.execute(sa.select([sa.column('id')]).select_from(sa.text('users'))):
        if user:
            salt = os.urandom(HASH_SIZE)
            pwd = hashlib.sha256(salt + bytes(default_password, encoding='utf8')).digest()
            connection.execute(
                users.update().where(
                    users.c.id == user.id
                ).values(
                    _salt=salt,
                    _hashed_password=pwd,
                )
            )

    # alter columns to be not nullable
    op.alter_column('users', '_salt', nullable=False)
    op.alter_column('users', '_hashed_password', nullable=False)
Beispiel #15
0
    def test_legacy_typemap(self):
        table1 = table(
            "mytable",
            column("myid", Integer),
            column("name", String),
            column("description", String),
        )
        with testing.expect_deprecated(
            "The text.typemap parameter is deprecated"
        ):
            t = text(
                "select id, name from user",
                typemap=dict(id=Integer, name=String),
            )

        stmt = select([table1.c.myid]).select_from(
            table1.join(t, table1.c.myid == t.c.id)
        )
        compiled = stmt.compile()
        eq_(
            compiled._create_result_map(),
            {
                "myid": (
                    "myid",
                    (table1.c.myid, "myid", "myid"),
                    table1.c.myid.type,
                )
            },
        )
def upgrade():
    op.add_column('writeups', sa.Column('author', sa.Unicode(length=100), nullable=True))
    t_w = sa.table(
        'writeups',
        sa.column('id', sa.Integer),
        sa.column('author', sa.String),
    )
    t_wp = sa.table(
        'writeup_posts',
        sa.column('writeup_id', sa.Integer),
        sa.column('author', sa.String),
        sa.column('index', sa.Integer),
    )
    stmt = sa.select([t_wp.c.author]).where(sa.and_(t_wp.c.writeup_id == t_w.c.id, t_wp.c.index == 1))
    op.execute(t_w.update().values(author=stmt))
    op.alter_column('writeups', 'author', nullable=False)
Beispiel #17
0
    def test_insert_from_select_dont_mutate_raw_columns(self):
        # test [ticket:3603]
        from sqlalchemy import table

        table_ = table(
            "mytable",
            Column("foo", String),
            Column("bar", String, default="baz"),
        )

        stmt = select([table_.c.foo])
        insert = table_.insert().from_select(["foo"], stmt)

        self.assert_compile(stmt, "SELECT mytable.foo FROM mytable")
        self.assert_compile(
            insert,
            "INSERT INTO mytable (foo, bar) "
            "SELECT mytable.foo, :bar AS anon_1 FROM mytable",
        )
        self.assert_compile(stmt, "SELECT mytable.foo FROM mytable")
        self.assert_compile(
            insert,
            "INSERT INTO mytable (foo, bar) "
            "SELECT mytable.foo, :bar AS anon_1 FROM mytable",
        )
def downgrade():
    # FIXME: this adds extraneous commas
    return
    log = sa.table('log', sa.column('type', sa.String), sa.column('msg', sa.String))
    rows = op.get_bind().execute(log.select().where(log.c.type == 'kick')).fetchall()
    values = [{'old_msg': x.msg, 'msg': x.msg.replace(' ', ',', 1)} for x in rows]
    op.get_bind().execute(log.update().where(log.c.msg == sa.bindparam('old_msg')).values(msg=sa.bindparam('msg')), values)
Beispiel #19
0
 def test_unconsumed_names_kwargs(self):
     t = table("t", column("x"), column("y"))
     assert_raises_message(
         exc.CompileError,
         "Unconsumed column names: z",
         t.insert().values(x=5, z=5).compile,
     )
Beispiel #20
0
def data_to_db():
    # Connect to DB
    db = sqlalchemy.create_engine('postgresql://*****:*****@postgres/mydatabase')
    engine = db.connect()  
    meta = sqlalchemy.MetaData(engine) 
    meta.reflect(bind=engine)

    # Create/Extend table
    try:
        table = sqlalchemy.Table("jsontable", 
                                 meta,               
                                 Column('idstr', Text, primary_key=True),
                                 Column('created_at', Text),
                                 Column('author', Text),
                                 Column('text', Text),
                                 Column('urls', Text),
                                 Column('platform', Text),
                                 extend_existing=True)
        table.create(engine) 
    except sqlalchemy.exc.ProgrammingError as e:
        print("Table already existis")                                              
        pass

    # Upsert entry
    record = sqlalchemy.table("jsontable",
                              Column('idstr', Text),
                              Column('created_at', Text),
                              Column('author', Text), 
                              Column('text', Text),
                              Column('urls', Text),
                              Column('platform', Text))
    records = 0
    consumer = KafkaConsumer('data', bootstrap_servers=['kafka:9092'])
    for msg in consumer:                                                
        msg = json.loads(msg.value.decode('utf-8'))                                        
        # Get rid of any non-ascii chars
        for k,v in msg.items():
            if isinstance(v, str):
                msg[k] = ''.join([i if ord(i) < 128 else ' ' for i in v])

            # Insert row if not already existing
            try:
                statement = record.insert().values(idstr = msg['idstr'],
                                                   created_at = msg['created_at'],
                                                   author = msg['author'],
                                                   text = msg['text'],
                                                   urls = msg['urls'],
                                                   platform = msg['platform'])

                engine.execute(statement)
                records += 1
                with open('test.txt','a') as f:
                    f.write(json.dumps(msg)+'\n')
                print(records)
            except sqlalchemy.exc.IntegrityError as e:                                       
                continue     


    return records
Beispiel #21
0
def upgrade():
    op.add_column(
        'tag_category', sa.Column('default', sa.Boolean(), nullable=True))
    op.execute(
        sa.table('tag_category', sa.column('default'))
            .update()
            .values(default=False))
    op.alter_column('tag_category', 'default', nullable=False)
def upgrade():
    log = sa.table('log', sa.column('type', sa.String), sa.column('msg', sa.String))
    rows = op.get_bind().execute(log.select().where(log.c.type == 'kick').where(log.c.msg.like('%,%'))).fetchall()
    rows = [x for x in rows if ',' in x.msg and x.msg.find(',') < x.msg.find(' ')]
    if not rows:
        return
    values = [{'old_msg': x.msg, 'msg': x.msg.replace(',', ' ', 1)} for x in rows]
    op.get_bind().execute(log.update().where(log.c.msg == sa.bindparam('old_msg')).values(msg=sa.bindparam('msg')), values)
def upgrade():
    for table in tables:
        op.add_column(table, sa.Column('version', sa.Integer(), nullable=True))
        op.execute(
            sa.table(table, sa.column('version'))
            .update()
            .values(version=1))
        op.alter_column(table, 'version', nullable=False)
def upgrade():
    cols = ['id'] + sorted({name for fields in ID_BEFORE_AFTER.values() for name in fields})
    source=sa.table('source', *map(sa.column, cols))
    for id_, fields in sorted(ID_BEFORE_AFTER.items()):
        update = sa.update(source)\
            .where(source.c.id == id_)\
            .where(sa.and_(source.c[f] == before for f, (before, _) in fields.items()))\
            .values({f: after for f, (_, after) in fields.items()})
        op.execute(update)
Beispiel #25
0
    def test_unconsumed_names_kwargs_w_keys(self):
        t = table("t", column("x"), column("y"))

        assert_raises_message(
            exc.CompileError,
            "Unconsumed column names: j",
            t.update().values(x=5, j=7).compile,
            column_keys=['j']
        )
 def delete_null_duplicates(tablename, columns, notnull, returning=sa.text('*')):
     assert columns
     table = sa.table(tablename, *map(sa.column, ['pk'] + columns))
     any_null = sa.or_(table.c[n] == sa.null() for n in notnull)
     yield table.delete(bind=conn).where(any_null).returning(returning)
     other = table.alias()
     yield table.delete(bind=conn).where(~any_null).returning(returning)\
         .where(sa.exists()
             .where(sa.and_(table.c[c] == other.c[c] for c in columns))
             .where(table.c.pk > other.c.pk))
Beispiel #27
0
def increment(engine, metadata, counter):
	storm = metadata.tables['storm']
	excluded = sqlalchemy.table('excluded', sqlalchemy.column(counter))
	with engine.begin() as conn:
		do_update = DoUpdate([storm.c.date]).set(**{counter: storm.c[counter] + excluded.c[counter]})
		count, = conn.execute(storm.insert(postgresql_on_conflict=do_update).returning(storm.c[counter]), {
			'date': datetime.datetime.now(config['timezone']).date(),
			counter: 1,
		}).first()
	return count
def upgrade():
    conn = op.get_bind()
    load_page_subtype = table('load_page_subtype',
        column('id', Integer),
        column('name', String)
    )
    op.bulk_insert(load_page_subtype,
        [
            {'id': 1,'name': 'Учебно-методическая работа'},
            {'id': 2,'name': 'Организационно-методическая работа'},
            {'id': 3,'name': 'Научная работа'},
            {'id': 4,'name': 'Воспитательная работа'},
            {'id': 5,'name': 'Другие виды работ'},
        ]
    )
    load_page_work_type = table('load_page_work_type',
        column('id', Integer),
        column('name', String),
        column('info', String),
        column('mark', Integer),
        column('subtype_id', Integer),
    )
    op.bulk_insert(load_page_work_type,
        [
            {'id': 1,'name': 'Читання лекцій, до 50 год.', 'info': 'За звітній період', 'mark': 1, 'subtype_id': 1},
            {'id': 2,'name': 'Читання лекцій, до 100 год.', 'info': 'За звітній період', 'mark': 2, 'subtype_id': 1},
            {'id': 3,'name': 'Читання лекцій, до 150 год.', 'info': 'За звітній період', 'mark': 3, 'subtype_id': 1},
            {'id': 4,'name': 'Читання лекцій, більше 150 год.', 'info': 'За звітній період', 'mark': 4, 'subtype_id': 1},
            {'id': 5,'name': 'Робота опонентом на захистах дисертацій', 'info': 'За звітній період', 'mark': 1, 'subtype_id': 1},
            {'id': 6,'name': 'Наявність почесних звань, нагород: почесні знаки МОНУ', 'info': 'За звітній період', 'mark': 5, 'subtype_id': 1},
            {'id': 7,'name': 'Наявність почесних звань, нагород: відмінник освіти України, грамота МОНУ', 'info': 'За звітній період', 'mark': 4, 'subtype_id': 1},
            {'id': 8,'name': 'Наявність почесних звань, нагород: заслужений професор ДДМА', 'info': 'За звітній період', 'mark': 6, 'subtype_id': 1},
            {'id': 9,'name': 'Наявність почесних звань, нагород: почесний професор ДДМА', 'info': 'За звітній період', 'mark': 6, 'subtype_id': 1},
            {'id': 10,'name': 'Наявність почесних звань, нагород: заслужений викладач ДДМА', 'info': 'За звітній період', 'mark': 6, 'subtype_id': 1},
            {'id': 11,'name': 'Наявність почесних звань, нагород: заслужений працівник ДДМА', 'info': 'За звітній період', 'mark': 6, 'subtype_id': 1},
            {'id': 12,'name': 'Наявність почесних звань, нагород: почесний доктор інших ВНЗ', 'info': 'За звітній період', 'mark': 6, 'subtype_id': 1},
            {'id': 13,'name': 'Наявність почесних звань, нагород: почесні звання закордонних орг-й', 'info': 'За звітній період', 'mark': 6, 'subtype_id': 1},
            {'id': 14,'name': 'Наявність почесних звань, нагород: кращий працівник ДДМА: декан, зав. кафедри, викладач тощо', 'info': 'За звітній період', 'mark': 2, 'subtype_id': 1},
            {'id': 15,'name': 'за кращу методрозробку: 1 місце', 'info': 'Кіл-ть балів поділити на кількість авторів', 'mark': 4, 'subtype_id': 1},
            {'id': 16,'name': 'за кращу методрозробку: 2 місце', 'info': 'Кіл-ть балів поділити на кількість авторів', 'mark': 3, 'subtype_id': 1},
            {'id': 17,'name': 'за кращу методрозробку: 3 місце', 'info': 'Кіл-ть балів поділити на кількість авторів', 'mark': 2, 'subtype_id': 1},
        ]
    )
def upgrade():
    source = sa.table('source', sa.column('id'), sa.column('updated', sa.DateTime))
    dt = sa.bindparam('dt', UPDATED)
    touch = sa.update(source, bind=op.get_bind())\
        .where(source.c.id == sa.bindparam('id_'))\
        .where(source.c.updated < dt)\
        .values(updated=dt)

    for id_ in IDS:
        touch.execute(id_=id_)
def upgrade():
    ### commands auto generated by Alembic - please adjust! ###
    email = sa.table('email', sa.Column('email_id', sa.String), sa.Column('subject', sa.String))

    op.add_column('email', sa.Column('email_id', sideboard.lib.sa.CoerceUTF8(), server_default='', nullable=False))
    op.execute(
        email.update().where(email.c.email_id=='').values(
            email_id=email.c.subject
        )
    )
Beispiel #31
0
class CompareAndCopyTest(fixtures.TestBase):

    # lambdas which return a tuple of ColumnElement objects.
    # must return at least two objects that should compare differently.
    # to test more varieties of "difference" additional objects can be added.
    fixtures = [
        lambda: (
            column("q"),
            column("x"),
            column("q", Integer),
            column("q", String),
        ),
        lambda: (~column("q", Boolean), ~column("p", Boolean)),
        lambda: (
            table_a.c.a.label("foo"),
            table_a.c.a.label("bar"),
            table_a.c.b.label("foo"),
        ),
        lambda: (
            _label_reference(table_a.c.a.desc()),
            _label_reference(table_a.c.a.asc()),
        ),
        lambda: (_textual_label_reference("a"), _textual_label_reference("b")),
        lambda: (
            text("select a, b from table").columns(a=Integer, b=String),
            text("select a, b, c from table").columns(
                a=Integer, b=String, c=Integer),
        ),
        lambda: (
            column("q") == column("x"),
            column("q") == column("y"),
            column("z") == column("x"),
        ),
        lambda: (
            cast(column("q"), Integer),
            cast(column("q"), Float),
            cast(column("p"), Integer),
        ),
        lambda: (
            bindparam("x"),
            bindparam("y"),
            bindparam("x", type_=Integer),
            bindparam("x", type_=String),
            bindparam(None),
        ),
        lambda: (_OffsetLimitParam("x"), _OffsetLimitParam("y")),
        lambda: (func.foo(), func.foo(5), func.bar()),
        lambda: (func.current_date(), func.current_time()),
        lambda: (
            func.next_value(Sequence("q")),
            func.next_value(Sequence("p")),
        ),
        lambda: (True_(), False_()),
        lambda: (Null(), ),
        lambda: (ReturnTypeFromArgs("foo"), ReturnTypeFromArgs(5)),
        lambda: (FunctionElement(5), FunctionElement(5, 6)),
        lambda: (func.count(), func.not_count()),
        lambda: (func.char_length("abc"), func.char_length("def")),
        lambda: (GenericFunction("a", "b"), GenericFunction("a")),
        lambda: (CollationClause("foobar"), CollationClause("batbar")),
        lambda: (
            type_coerce(column("q", Integer), String),
            type_coerce(column("q", Integer), Float),
            type_coerce(column("z", Integer), Float),
        ),
        lambda: (table_a.c.a, table_b.c.a),
        lambda: (tuple_([1, 2]), tuple_([3, 4])),
        lambda: (func.array_agg([1, 2]), func.array_agg([3, 4])),
        lambda: (
            func.percentile_cont(0.5).within_group(table_a.c.a),
            func.percentile_cont(0.5).within_group(table_a.c.b),
            func.percentile_cont(0.5).within_group(table_a.c.a, table_a.c.b),
            func.percentile_cont(0.5).within_group(table_a.c.a, table_a.c.b,
                                                   column("q")),
        ),
        lambda: (
            func.is_equal("a", "b").as_comparison(1, 2),
            func.is_equal("a", "c").as_comparison(1, 2),
            func.is_equal("a", "b").as_comparison(2, 1),
            func.is_equal("a", "b", "c").as_comparison(1, 2),
            func.foobar("a", "b").as_comparison(1, 2),
        ),
        lambda: (
            func.row_number().over(order_by=table_a.c.a),
            func.row_number().over(order_by=table_a.c.a, range_=(0, 10)),
            func.row_number().over(order_by=table_a.c.a, range_=(None, 10)),
            func.row_number().over(order_by=table_a.c.a, rows=(None, 20)),
            func.row_number().over(order_by=table_a.c.b),
            func.row_number().over(order_by=table_a.c.a,
                                   partition_by=table_a.c.b),
        ),
        lambda: (
            func.count(1).filter(table_a.c.a == 5),
            func.count(1).filter(table_a.c.a == 10),
            func.foob(1).filter(table_a.c.a == 10),
        ),
        lambda: (
            and_(table_a.c.a == 5, table_a.c.b == table_b.c.a),
            and_(table_a.c.a == 5, table_a.c.a == table_b.c.a),
            or_(table_a.c.a == 5, table_a.c.b == table_b.c.a),
            ClauseList(table_a.c.a == 5, table_a.c.b == table_b.c.a),
            ClauseList(table_a.c.a == 5, table_a.c.b == table_a.c.a),
        ),
        lambda: (
            case(whens=[(table_a.c.a == 5, 10), (table_a.c.a == 10, 20)]),
            case(whens=[(table_a.c.a == 18, 10), (table_a.c.a == 10, 20)]),
            case(whens=[(table_a.c.a == 5, 10), (table_a.c.b == 10, 20)]),
            case(whens=[
                (table_a.c.a == 5, 10),
                (table_a.c.b == 10, 20),
                (table_a.c.a == 9, 12),
            ]),
            case(
                whens=[(table_a.c.a == 5, 10), (table_a.c.a == 10, 20)],
                else_=30,
            ),
            case({
                "wendy": "W",
                "jack": "J"
            }, value=table_a.c.a, else_="E"),
            case({
                "wendy": "W",
                "jack": "J"
            }, value=table_a.c.b, else_="E"),
            case({
                "wendy_w": "W",
                "jack": "J"
            }, value=table_a.c.a, else_="E"),
        ),
        lambda: (
            extract("foo", table_a.c.a),
            extract("foo", table_a.c.b),
            extract("bar", table_a.c.a),
        ),
        lambda: (
            Slice(1, 2, 5),
            Slice(1, 5, 5),
            Slice(1, 5, 10),
            Slice(2, 10, 15),
        ),
        lambda: (
            select([table_a.c.a]),
            select([table_a.c.a, table_a.c.b]),
            select([table_a.c.b, table_a.c.a]),
            select([table_a.c.a]).where(table_a.c.b == 5),
            select([table_a.c.a]).where(table_a.c.b == 5).where(table_a.c.a ==
                                                                10),
            select([table_a.c.a]).where(table_a.c.b == 5).with_for_update(),
            select([table_a.c.a]).where(table_a.c.b == 5).with_for_update(
                nowait=True),
            select([table_a.c.a]).where(table_a.c.b == 5).correlate(table_b),
            select([table_a.c.a]).where(table_a.c.b == 5).correlate_except(
                table_b),
        ),
        lambda: (
            table_a.join(table_b, table_a.c.a == table_b.c.a),
            table_a.join(table_b,
                         and_(table_a.c.a == table_b.c.a, table_a.c.b == 1)),
            table_a.outerjoin(table_b, table_a.c.a == table_b.c.a),
        ),
        lambda: (
            table_a.alias("a"),
            table_a.alias("b"),
            table_a.alias(),
            table_b.alias("a"),
            select([table_a.c.a]).alias("a"),
        ),
        lambda: (
            FromGrouping(table_a.alias("a")),
            FromGrouping(table_a.alias("b")),
        ),
        lambda: (
            select([table_a.c.a]).as_scalar(),
            select([table_a.c.a]).where(table_a.c.b == 5).as_scalar(),
        ),
        lambda: (
            exists().where(table_a.c.a == 5),
            exists().where(table_a.c.b == 5),
        ),
        lambda: (
            union(select([table_a.c.a]), select([table_a.c.b])),
            union(select([table_a.c.a]), select([table_a.c.b])).order_by("a"),
            union_all(select([table_a.c.a]), select([table_a.c.b])),
            union(select([table_a.c.a])),
            union(
                select([table_a.c.a]),
                select([table_a.c.b]).where(table_a.c.b > 5),
            ),
        ),
        lambda: (
            table("a", column("x"), column("y")),
            table("a", column("y"), column("x")),
            table("b", column("x"), column("y")),
            table("a", column("x"), column("y"), column("z")),
            table("a", column("x"), column("y", Integer)),
            table("a", column("q"), column("y", Integer)),
        ),
        lambda: (
            Table("a", MetaData(), Column("q", Integer), Column("b", String)),
            Table("b", MetaData(), Column("q", Integer), Column("b", String)),
        ),
    ]

    @classmethod
    def setup_class(cls):
        # TODO: we need to get dialects here somehow, perhaps in test_suite?
        [
            importlib.import_module("sqlalchemy.dialects.%s" % d)
            for d in dialects.__all__ if not d.startswith("_")
        ]

    def test_all_present(self):
        need = set(
            cls for cls in class_hierarchy(ClauseElement)
            if issubclass(cls, (ColumnElement, Selectable)) and "__init__" in
            cls.__dict__ and not issubclass(cls, (Annotated))
            and "orm" not in cls.__module__ and "crud" not in cls.__module__
            and "dialects" not in cls.__module__  # TODO: dialects?
        ).difference({ColumnElement, UnaryExpression})
        for fixture in self.fixtures:
            case_a = fixture()
            for elem in case_a:
                for mro in type(elem).__mro__:
                    need.discard(mro)

        is_false(bool(need), "%d Remaining classes: %r" % (len(need), need))

    def test_compare(self):
        for fixture in self.fixtures:
            case_a = fixture()
            case_b = fixture()

            for a, b in itertools.combinations_with_replacement(
                    range(len(case_a)), 2):
                if a == b:
                    is_true(
                        case_a[a].compare(case_b[b],
                                          arbitrary_expression=True),
                        "%r != %r" % (case_a[a], case_b[b]),
                    )

                else:
                    is_false(
                        case_a[a].compare(case_b[b],
                                          arbitrary_expression=True),
                        "%r == %r" % (case_a[a], case_b[b]),
                    )

    def test_cache_key(self):
        def assert_params_append(assert_params):
            def append(param):
                if param._value_required_for_cache:
                    assert_params.append(param)
                else:
                    is_(param.value, None)

            return append

        for fixture in self.fixtures:
            case_a = fixture()
            case_b = fixture()

            for a, b in itertools.combinations_with_replacement(
                    range(len(case_a)), 2):

                assert_a_params = []
                assert_b_params = []

                visitors.traverse_depthfirst(
                    case_a[a],
                    {},
                    {"bindparam": assert_params_append(assert_a_params)},
                )
                visitors.traverse_depthfirst(
                    case_b[b],
                    {},
                    {"bindparam": assert_params_append(assert_b_params)},
                )
                if assert_a_params:
                    assert_raises_message(
                        NotImplementedError,
                        "bindparams collection argument required ",
                        case_a[a]._cache_key,
                    )
                if assert_b_params:
                    assert_raises_message(
                        NotImplementedError,
                        "bindparams collection argument required ",
                        case_b[b]._cache_key,
                    )

                if not assert_a_params and not assert_b_params:
                    if a == b:
                        eq_(case_a[a]._cache_key(), case_b[b]._cache_key())
                    else:
                        ne_(case_a[a]._cache_key(), case_b[b]._cache_key())

    def test_cache_key_gather_bindparams(self):
        for fixture in self.fixtures:
            case_a = fixture()
            case_b = fixture()

            # in the "bindparams" case, the cache keys for bound parameters
            # with only different values will be the same, but the params
            # themselves are gathered into a collection.
            for a, b in itertools.combinations_with_replacement(
                    range(len(case_a)), 2):
                a_params = {"bindparams": []}
                b_params = {"bindparams": []}
                if a == b:
                    a_key = case_a[a]._cache_key(**a_params)
                    b_key = case_b[b]._cache_key(**b_params)
                    eq_(a_key, b_key)

                    if a_params["bindparams"]:
                        for a_param, b_param in zip(a_params["bindparams"],
                                                    b_params["bindparams"]):
                            assert a_param.compare(b_param)
                else:
                    a_key = case_a[a]._cache_key(**a_params)
                    b_key = case_b[b]._cache_key(**b_params)

                    if a_key == b_key:
                        for a_param, b_param in zip(a_params["bindparams"],
                                                    b_params["bindparams"]):
                            if not a_param.compare(b_param):
                                break
                        else:
                            assert False, "Bound parameters are all the same"
                    else:
                        ne_(a_key, b_key)

                assert_a_params = []
                assert_b_params = []
                visitors.traverse_depthfirst(
                    case_a[a], {}, {"bindparam": assert_a_params.append})
                visitors.traverse_depthfirst(
                    case_b[b], {}, {"bindparam": assert_b_params.append})

                # note we're asserting the order of the params as well as
                # if there are dupes or not.  ordering has to be deterministic
                # and matches what a traversal would provide.
                eq_(a_params["bindparams"], assert_a_params)
                eq_(b_params["bindparams"], assert_b_params)

    def test_compare_col_identity(self):
        stmt1 = (select([table_a.c.a, table_b.c.b
                         ]).where(table_a.c.a == table_b.c.b).alias())
        stmt1_c = (select([table_a.c.a, table_b.c.b
                           ]).where(table_a.c.a == table_b.c.b).alias())

        stmt2 = union(select([table_a]), select([table_b]))

        stmt3 = select([table_b])

        equivalents = {table_a.c.a: [table_b.c.a]}

        is_false(
            stmt1.compare(stmt2, use_proxies=True, equivalents=equivalents))

        is_true(
            stmt1.compare(stmt1_c, use_proxies=True, equivalents=equivalents))
        is_true((table_a.c.a == table_b.c.b).compare(
            stmt1.c.a == stmt1.c.b,
            use_proxies=True,
            equivalents=equivalents,
        ))

    def test_copy_internals(self):
        for fixture in self.fixtures:
            case_a = fixture()
            case_b = fixture()

            assert case_a[0].compare(case_b[0])

            clone = case_a[0]._clone()
            clone._copy_internals()

            assert clone.compare(case_b[0])

            stack = [clone]
            seen = {clone}
            found_elements = False
            while stack:
                obj = stack.pop(0)

                items = [
                    subelem for key, elem in clone.__dict__.items()
                    if key != "_is_clone_of" and elem is not None
                    for subelem in util.to_list(elem)
                    if (isinstance(subelem, (ColumnElement, ClauseList))
                        and subelem not in seen and not isinstance(
                            subelem, Immutable) and subelem is not case_a[0])
                ]
                stack.extend(items)
                seen.update(items)

                if obj is not clone:
                    found_elements = True
                    # ensure the element will not compare as true
                    obj.compare = lambda other, **kw: False
                    obj.__visit_name__ = "dont_match"

            if found_elements:
                assert not clone.compare(case_b[0])
            assert case_a[0].compare(case_b[0])
def data_upgrades():
    tbl = sa.table(*TABLE[:-1])

    op.bulk_insert(tbl, [
        DEMO_SCEP_CONFIG
    ])
result_proxy = conn.execute(
    text("SELECT id, first_name, last_name FROM users WHERE first_name = :first_name"),
    first_name="David"
)
print 'All', result_proxy.fetchall()

result_proxy = conn.execute(
    text("SELECT id, first_name, last_name FROM users WHERE first_name = :first_name"),
    first_name="David"
)
print 'One', result_proxy.fetchone()

# ----------------------------------------------------------------------
# SELECTing With A Schema
# ----------------------------------------------------------------------
from sqlalchemy import literal_column, table

select_query = select([
    literal_column('first_name'),
    literal_column('last_name')
]).select_from(table('users'))

# Fetch all
result_proxy = conn.execute(select_query)
print 'All Literal', result_proxy.fetchall()

# Fetch one
result_proxy = conn.execute(select_query)
print 'One Literal', result_proxy.fetchone()
Beispiel #34
0
Revises: 6d95f2bca9da
Create Date: 2020-12-30 16:37:17.450543

"""
from alembic import op
import sqlalchemy as sa

# revision identifiers, used by Alembic.
revision = "0067f9395dcd"
down_revision = "6d95f2bca9da"
branch_labels = None
depends_on = None

discussion_source_table = sa.table(
    "discussion_sources",
    sa.column("discussion_source_id", sa.SmallInteger),
    sa.column("discussion_source_name", sa.String),
)


def upgrade():
    op.bulk_insert(
        discussion_source_table,
        [
            {
                "discussion_source_id": 1,
                "discussion_source_name": "HN"
            },
            {
                "discussion_source_id": 2,
                "discussion_source_name": "REDDIT"
    def sample_using_limit(
        self,
        execution_engine: "SqlAlchemyExecutionEngine",  # noqa: F821
        batch_spec: BatchSpec,
        where_clause: Optional[Selectable] = None,
    ) -> Union[str, BinaryExpression, BooleanClauseList]:
        """Sample using a limit with configuration provided via the batch_spec.

        Note: where_clause needs to be included at this stage since SqlAlchemy's semantics
        for LIMIT are different than normal WHERE clauses.

        Also this requires an engine to find the dialect since certain databases require
        different handling.

        Args:
            execution_engine: Engine used to connect to the database.
            batch_spec: Batch specification describing the batch of interest.
            where_clause: Optional clause used in WHERE clause. Typically generated by a splitter.

        Returns:
            A query as a string or sqlalchemy object.
        """

        # Split clause should be permissive of all values if not supplied.
        if where_clause is None:
            if execution_engine.dialect_name == "sqlite":
                where_clause = sa.text("1 = 1")
            else:
                where_clause = sa.true()

        table_name: str = batch_spec["table_name"]

        # SQLalchemy's semantics for LIMIT are different than normal WHERE clauses,
        # so the business logic for building the query needs to be different.
        dialect_name: str = execution_engine.dialect_name
        if dialect_name == GESqlDialect.ORACLE.value:
            # TODO: AJB 20220429 WARNING THIS oracle dialect METHOD IS NOT COVERED BY TESTS
            # limit doesn't compile properly for oracle so we will append rownum to query string later
            raw_query: Selectable = (sa.select("*").select_from(
                sa.table(table_name,
                         schema=batch_spec.get("schema_name",
                                               None))).where(where_clause))
            query: str = str(
                raw_query.compile(
                    dialect=execution_engine.dialect,
                    compile_kwargs={"literal_binds": True},
                ))
            query += "\nAND ROWNUM <= %d" % batch_spec["sampling_kwargs"]["n"]
            return query
        elif dialect_name == GESqlDialect.MSSQL.value:
            # Note that this code path exists because the limit parameter is not getting rendered
            # successfully in the resulting mssql query.
            selectable_query: Selectable = (sa.select("*").select_from(
                sa.table(table_name,
                         schema=batch_spec.get(
                             "schema_name", None))).where(where_clause).limit(
                                 batch_spec["sampling_kwargs"]["n"]))
            string_of_query: str = str(
                selectable_query.compile(
                    dialect=execution_engine.dialect,
                    compile_kwargs={"literal_binds": True},
                ))
            n: Union[str, int] = batch_spec["sampling_kwargs"]["n"]
            self._validate_mssql_limit_param(n)
            # This string replacement is here because the limit parameter is not substituted during query.compile()
            string_of_query = string_of_query.replace("?", str(n))
            return string_of_query
        else:
            return (sa.select("*").select_from(
                sa.table(table_name,
                         schema=batch_spec.get(
                             "schema_name", None))).where(where_clause).limit(
                                 batch_spec["sampling_kwargs"]["n"]))
Beispiel #36
0
"""
from alembic import op
import sqlalchemy as sa
import manager_rest  # Adding this manually

# revision identifiers, used by Alembic.
revision = '406821843b55'
down_revision = '4dfd8797fdfa'
branch_labels = None
depends_on = None

# Define tables with just the columns needed
# to generate the UPDATE sql expressions below
users_roles = sa.table(
    'users_roles',
    sa.column('user_id', sa.Integer),
    sa.column('role_id', sa.Integer),
)
users_tenants = sa.table(
    'users_tenants',
    sa.column('user_id', sa.Integer),
    sa.column('role_id', sa.Integer),
)
roles = sa.table(
    'roles',
    sa.column('id', sa.Integer),
    sa.column('name', sa.Text),
)


def update_system_role(from_role, to_role):
Beispiel #37
0
                    unique=False)
    op.create_index(op.f('blueprints_labels__labeled_model_fk_idx'),
                    'blueprints_labels', ['_labeled_model_fk'],
                    unique=False)
    op.create_index(op.f('blueprints_labels_created_at_idx'),
                    'blueprints_labels', ['created_at'],
                    unique=False)
    op.create_index(op.f('blueprints_labels_key_idx'),
                    'blueprints_labels', ['key'],
                    unique=False)
    op.create_index(op.f('blueprints_labels_value_idx'),
                    'blueprints_labels', ['value'],
                    unique=False)


dl_table = sa.table('deployments_labels', sa.Column('_labeled_model_fk'),
                    sa.Column('_deployment_fk'))


def _modify_deployments_labels_table():
    op.add_column('deployments_labels',
                  sa.Column('_labeled_model_fk', sa.Integer(), nullable=True))
    op.execute(dl_table.update().where(
        dl_table.c._labeled_model_fk.is_(None)).values(
            _labeled_model_fk=dl_table.c._deployment_fk))
    op.alter_column('deployments_labels',
                    '_labeled_model_fk',
                    existing_type=sa.Integer(),
                    nullable=False)

    op.drop_index('deployments_labels__deployment_fk_idx',
                  table_name='deployments_labels')
Beispiel #38
0
                "select id, name from user",
                typemap=dict(id=Integer, name=String),
            )

        stmt = select([table1.c.myid
                       ]).select_from(table1.join(t, table1.c.myid == t.c.id))
        compiled = stmt.compile()
        eq_(
            compiled._create_result_map(),
            {
                "myid": (
                    "myid",
                    (table1.c.myid, "myid", "myid"),
                    table1.c.myid.type,
                )
            },
        )

    def test_autocommit(self):
        with testing.expect_deprecated(
                "The text.autocommit parameter is deprecated"):
            t = text("select id, name from user", autocommit=True)


table1 = table(
    "mytable",
    column("myid", Integer),
    column("name", String),
    column("description", String),
)
Beispiel #39
0
def _move_dangling_data_to_new_table(session, source_table: "Table",
                                     source_query: "Query",
                                     target_table_name: str):
    from sqlalchemy import column, select, table
    from sqlalchemy.sql.selectable import Join

    bind = session.get_bind()
    dialect_name = bind.dialect.name

    # First: Create moved rows from new table
    if dialect_name == "mssql":
        cte = source_query.cte("source")
        moved_data_tbl = table(target_table_name,
                               *(column(c.name) for c in cte.columns))
        ins = moved_data_tbl.insert().from_select(list(cte.columns),
                                                  select([cte]))

        stmt = ins.compile(bind=session.get_bind())
        cte_sql = stmt.ctes[cte]

        session.execute(
            f"WITH {cte_sql} SELECT source.* INTO {target_table_name} FROM source"
        )
    elif dialect_name == "mysql":
        # MySQL with replication needs this split in to two queries, so just do it for all MySQL
        # ERROR 1786 (HY000): Statement violates GTID consistency: CREATE TABLE ... SELECT.
        session.execute(
            f"CREATE TABLE {target_table_name} LIKE {source_table.name}")
        session.execute(
            f"INSERT INTO {target_table_name} {source_query.selectable.compile(bind=session.get_bind())}"
        )
    else:
        # Postgres and SQLite both support the same "CREATE TABLE a AS SELECT ..." syntax
        session.execute(
            f"CREATE TABLE {target_table_name} AS {source_query.selectable.compile(bind=session.get_bind())}"
        )

    # Second: Now delete rows we've moved
    try:
        clause = source_query.whereclause
    except AttributeError:
        clause = source_query._whereclause

    if dialect_name == "sqlite":
        subq = source_query.selectable.with_only_columns(
            [text(f'{source_table}.ROWID')])
        delete = source_table.delete().where(column('ROWID').in_(subq))
    elif dialect_name in ("mysql", "mssql"):
        # This is not foolproof! But it works for the limited queries (with no params) that we use here
        stmt = source_query.selectable

        def _from_name(from_) -> str:
            if isinstance(from_, Join):
                return str(from_.compile(bind=bind))
            return str(from_)

        delete = (
            f"DELETE {source_table} FROM { ', '.join(_from_name(tbl) for tbl in stmt.froms) }"
            f" WHERE {clause.compile(bind=bind)}")
    else:
        for frm in source_query.selectable.froms:
            if hasattr(frm, 'onclause'):  # Table, or JOIN?
                clause &= frm.onclause
        delete = source_table.delete(clause)
    session.execute(delete)
Beispiel #40
0
def upgrade():
    ### commands auto generated by Alembic - please adjust! ###
    role_table = sa.table(
        'role',
        sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
        sa.Column('name', sa.String(length=80), nullable=True),
        sa.Column('description', sa.String(length=255), nullable=True),
    )
    op.bulk_insert(role_table, [
        {
            'id': 2,
            'name': 'product_category_view',
            'description': 'View product categories'
        },
        {
            'id': 3,
            'name': 'product_category_create',
            'description': 'Create product category'
        },
        {
            'id': 4,
            'name': 'product_category_edit',
            'description': 'Edit product category'
        },
        {
            'id': 5,
            'name': 'product_category_delete',
            'description': 'Delete product category'
        },
        {
            'id': 6,
            'name': 'sales_order_view',
            'description': 'View sales orders'
        },
        {
            'id': 7,
            'name': 'sales_order_create',
            'description': 'Create sales order'
        },
        {
            'id': 8,
            'name': 'sales_order_edit',
            'description': 'Edit sales order'
        },
        {
            'id': 9,
            'name': 'sales_order_delete',
            'description': 'Delete sales order'
        },
        {
            'id': 10,
            'name': 'purchase_order_view',
            'description': 'View purchase orders'
        },
        {
            'id': 11,
            'name': 'purchase_order_create',
            'description': 'Create purchase order'
        },
        {
            'id': 12,
            'name': 'purchase_order_edit',
            'description': 'Edit purchase order'
        },
        {
            'id': 13,
            'name': 'purchase_order_delete',
            'description': 'Delete purchase order'
        },
        {
            'id': 14,
            'name': 'expense_view',
            'description': 'View expenses'
        },
        {
            'id': 15,
            'name': 'expense_create',
            'description': 'Create expense'
        },
        {
            'id': 16,
            'name': 'expense_edit',
            'description': 'Edit expense'
        },
        {
            'id': 17,
            'name': 'expense_delete',
            'description': 'Delete expense'
        },
        {
            'id': 18,
            'name': 'incoming_view',
            'description': 'View incoming'
        },
        {
            'id': 19,
            'name': 'incoming_create',
            'description': 'Create incoming'
        },
        {
            'id': 20,
            'name': 'incoming_edit',
            'description': 'Edit incoming'
        },
        {
            'id': 21,
            'name': 'incoming_delete',
            'description': 'Delete incoming'
        },
        {
            'id': 22,
            'name': 'supplier_view',
            'description': 'View suppliers'
        },
        {
            'id': 23,
            'name': 'supplier_create',
            'description': 'Create supplier'
        },
        {
            'id': 24,
            'name': 'supplier_edit',
            'description': 'Edit supplier'
        },
        {
            'id': 25,
            'name': 'supplier_delete',
            'description': 'Delete supplier'
        },
        {
            'id': 26,
            'name': 'product_view',
            'description': 'View products'
        },
        {
            'id': 27,
            'name': 'product_create',
            'description': 'Create product'
        },
        {
            'id': 28,
            'name': 'product_edit',
            'description': 'Edit product'
        },
        {
            'id': 29,
            'name': 'product_delete',
            'description': 'Delete product'
        },
        {
            'id': 30,
            'name': 'enum_values_view',
            'description': 'View enum values'
        },
        {
            'id': 31,
            'name': 'enum_values_create',
            'description': 'Create enum value'
        },
        {
            'id': 32,
            'name': 'enum_values_edit',
            'description': 'Edit enum value'
        },
        {
            'id': 33,
            'name': 'enum_values_delete',
            'description': 'Delete enum value'
        },
        {
            'id': 34,
            'name': 'preference_view',
            'description': 'View system preference'
        },
        {
            'id': 35,
            'name': 'preference_edit',
            'description': 'Update system preference'
        },
        {
            'id': 36,
            'name': 'user_view',
            'description': 'View user'
        },
        {
            'id': 37,
            'name': 'user_create',
            'description': 'Create user'
        },
        {
            'id': 38,
            'name': 'user_edit',
            'description': 'Edit user'
        },
        {
            'id': 39,
            'name': 'user_delete',
            'description': 'Delete user'
        },
        {
            'id': 40,
            'name': 'role_view',
            'description': 'View roles'
        },
        {
            'id': 41,
            'name': 'role_create',
            'description': 'Create role'
        },
        {
            'id': 42,
            'name': 'role_edit',
            'description': 'Edit role'
        },
        {
            'id': 43,
            'name': 'role_delete',
            'description': 'Delete role'
        },
    ],
                   multiinsert=False)
    from sqlalchemy.sql import text
    op.get_bind().execute(text("ALTER SEQUENCE role_id_seq RESTART WITH 44;"))
Beispiel #41
0
 def test_legacy_setter(self):
     t = table("t", column("c"))
     s = select([t])
     s.for_update = "nowait"
     eq_(s._for_update_arg.nowait, True)
Beispiel #42
0
from flask import Flask, request
from flask_restful import Resource, Api
from sqlalchemy import create_engine, table, column
from json import dumps
from flask.json import jsonify
import traceback

db = create_engine('postgres://*****:*****@host:5432/esplogger')
app = Flask(__name__)
api = Api(app)

esp_names_to_mac = table("esp_names_to_mac", column("mac"), column("id"),
                         column("name"))


class CurrentValue(Resource):
    def get(self):
        conn = db.connect()
        args = request.args
        limit = 0
        try:
            limit = int(args['limit'])
        except:
            pass
        if limit > 0:
            querystr = "select sample_time, sample_host, sample_value from esp_log ORDER BY sample_num desc LIMIT %s" % limit
        else:
            querystr = "select sample_time, sample_host, sample_value from esp_log ORDER BY sample_num desc"
        query = conn.execute(querystr)
        result = {
            'data': [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
import pandas as pd
from sqlalchemy import table, column

# читаем данные по постгресу
dbdata = open('transactions.txt', 'r').readlines()
IP = dbdata[0][:-1]
scheme = dbdata[1]
tabl = dbdata[3]
user = dbdata[4][:-1]
pwd = dbdata[5]

# Структура таблицы. Это нам еще пригодится
TR = table(
    "transactions",
    column("date"),
    column("user"),
    column("event"),
    column("sum"),
    schema='course',
)

# Генерим метки времени
# Предполагается генерить данные на год назад и на месяц вперед
CURRENTTIME = datetime.now()

STARTDATE = CURRENTTIME - timedelta(days=365)
ENDDATE = CURRENTTIME + timedelta(days=31)
DATES = []
i = STARTDATE
# TODO реализовать внутренними средствами, а не Вайлом
while i < ENDDATE:
    i = i + timedelta(seconds=10)
def downgrade():
    property = table('property', column('name', String))

    op.execute(property.update().where(
        property.c.name == op.inline_literal('user_hosts_change')).values(
            {'name': op.inline_literal('user_mac_change')}))
Create Date: 2016-07-08 17:54:57.399139
"""

import sqlalchemy as sa
from alembic import op
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker

revision = "9e01b7287da2"
down_revision = "6f86796f64e0"

Session = sessionmaker()


user_group = sa.table(
    "user_group", sa.Column("user_id", sa.Integer), sa.Column("group_id", sa.Integer)
)


def upgrade():
    session = Session(bind=op.get_bind())

    # Find all the groups of duplicate user_group rows that have the same
    # user_id and group_id values.
    groups = (
        session.query(user_group)
        .group_by("user_id", "group_id")
        .having(sa.func.count("*") > 1)
    )

    for user_id, group_id in groups:
Beispiel #46
0
class CoreFixtures(object):
    # lambdas which return a tuple of ColumnElement objects.
    # must return at least two objects that should compare differently.
    # to test more varieties of "difference" additional objects can be added.
    fixtures = [
        lambda: (
            column("q"),
            column("x"),
            column("q", Integer),
            column("q", String),
        ),
        lambda: (~column("q", Boolean), ~column("p", Boolean)),
        lambda: (
            table_a.c.a.label("foo"),
            table_a.c.a.label("bar"),
            table_a.c.b.label("foo"),
        ),
        lambda: (
            _label_reference(table_a.c.a.desc()),
            _label_reference(table_a.c.a.asc()),
        ),
        lambda: (_textual_label_reference("a"), _textual_label_reference("b")),
        lambda: (
            text("select a, b from table").columns(a=Integer, b=String),
            text("select a, b, c from table").columns(
                a=Integer, b=String, c=Integer),
            text("select a, b, c from table where foo=:bar").bindparams(
                bindparam("bar", Integer)),
            text("select a, b, c from table where foo=:foo").bindparams(
                bindparam("foo", Integer)),
            text("select a, b, c from table where foo=:bar").bindparams(
                bindparam("bar", String)),
        ),
        lambda: (
            column("q") == column("x"),
            column("q") == column("y"),
            column("z") == column("x"),
            column("z") + column("x"),
            column("z") - column("x"),
            column("x") - column("z"),
            column("z") > column("x"),
            # note these two are mathematically equivalent but for now they
            # are considered to be different
            column("z") >= column("x"),
            column("x") <= column("z"),
            column("q").between(5, 6),
            column("q").between(5, 6, symmetric=True),
            column("q").like("somstr"),
            column("q").like("somstr", escape="\\"),
            column("q").like("somstr", escape="X"),
        ),
        lambda: (
            table_a.c.a,
            table_a.c.a._annotate({"orm": True}),
            table_a.c.a._annotate({
                "orm": True
            })._annotate({"bar": False}),
            table_a.c.a._annotate({
                "orm": True,
                "parententity": MyEntity("a", table_a)
            }),
            table_a.c.a._annotate({
                "orm": True,
                "parententity": MyEntity("b", table_a)
            }),
            table_a.c.a._annotate(
                {
                    "orm": True,
                    "parententity": MyEntity("b", select([table_a]))
                }),
        ),
        lambda: (
            cast(column("q"), Integer),
            cast(column("q"), Float),
            cast(column("p"), Integer),
        ),
        lambda: (
            bindparam("x"),
            bindparam("y"),
            bindparam("x", type_=Integer),
            bindparam("x", type_=String),
            bindparam(None),
        ),
        lambda: (_OffsetLimitParam("x"), _OffsetLimitParam("y")),
        lambda: (func.foo(), func.foo(5), func.bar()),
        lambda: (func.current_date(), func.current_time()),
        lambda: (
            func.next_value(Sequence("q")),
            func.next_value(Sequence("p")),
        ),
        lambda: (True_(), False_()),
        lambda: (Null(), ),
        lambda: (ReturnTypeFromArgs("foo"), ReturnTypeFromArgs(5)),
        lambda: (FunctionElement(5), FunctionElement(5, 6)),
        lambda: (func.count(), func.not_count()),
        lambda: (func.char_length("abc"), func.char_length("def")),
        lambda: (GenericFunction("a", "b"), GenericFunction("a")),
        lambda: (CollationClause("foobar"), CollationClause("batbar")),
        lambda: (
            type_coerce(column("q", Integer), String),
            type_coerce(column("q", Integer), Float),
            type_coerce(column("z", Integer), Float),
        ),
        lambda: (table_a.c.a, table_b.c.a),
        lambda: (tuple_([1, 2]), tuple_([3, 4])),
        lambda: (func.array_agg([1, 2]), func.array_agg([3, 4])),
        lambda: (
            func.percentile_cont(0.5).within_group(table_a.c.a),
            func.percentile_cont(0.5).within_group(table_a.c.b),
            func.percentile_cont(0.5).within_group(table_a.c.a, table_a.c.b),
            func.percentile_cont(0.5).within_group(table_a.c.a, table_a.c.b,
                                                   column("q")),
        ),
        lambda: (
            func.is_equal("a", "b").as_comparison(1, 2),
            func.is_equal("a", "c").as_comparison(1, 2),
            func.is_equal("a", "b").as_comparison(2, 1),
            func.is_equal("a", "b", "c").as_comparison(1, 2),
            func.foobar("a", "b").as_comparison(1, 2),
        ),
        lambda: (
            func.row_number().over(order_by=table_a.c.a),
            func.row_number().over(order_by=table_a.c.a, range_=(0, 10)),
            func.row_number().over(order_by=table_a.c.a, range_=(None, 10)),
            func.row_number().over(order_by=table_a.c.a, rows=(None, 20)),
            func.row_number().over(order_by=table_a.c.b),
            func.row_number().over(order_by=table_a.c.a,
                                   partition_by=table_a.c.b),
        ),
        lambda: (
            func.count(1).filter(table_a.c.a == 5),
            func.count(1).filter(table_a.c.a == 10),
            func.foob(1).filter(table_a.c.a == 10),
        ),
        lambda: (
            and_(table_a.c.a == 5, table_a.c.b == table_b.c.a),
            and_(table_a.c.a == 5, table_a.c.a == table_b.c.a),
            or_(table_a.c.a == 5, table_a.c.b == table_b.c.a),
            ClauseList(table_a.c.a == 5, table_a.c.b == table_b.c.a),
            ClauseList(table_a.c.a == 5, table_a.c.b == table_a.c.a),
        ),
        lambda: (
            case(whens=[(table_a.c.a == 5, 10), (table_a.c.a == 10, 20)]),
            case(whens=[(table_a.c.a == 18, 10), (table_a.c.a == 10, 20)]),
            case(whens=[(table_a.c.a == 5, 10), (table_a.c.b == 10, 20)]),
            case(whens=[
                (table_a.c.a == 5, 10),
                (table_a.c.b == 10, 20),
                (table_a.c.a == 9, 12),
            ]),
            case(
                whens=[(table_a.c.a == 5, 10), (table_a.c.a == 10, 20)],
                else_=30,
            ),
            case({
                "wendy": "W",
                "jack": "J"
            }, value=table_a.c.a, else_="E"),
            case({
                "wendy": "W",
                "jack": "J"
            }, value=table_a.c.b, else_="E"),
            case({
                "wendy_w": "W",
                "jack": "J"
            }, value=table_a.c.a, else_="E"),
        ),
        lambda: (
            extract("foo", table_a.c.a),
            extract("foo", table_a.c.b),
            extract("bar", table_a.c.a),
        ),
        lambda: (
            Slice(1, 2, 5),
            Slice(1, 5, 5),
            Slice(1, 5, 10),
            Slice(2, 10, 15),
        ),
        lambda: (
            select([table_a.c.a]),
            select([table_a.c.a, table_a.c.b]),
            select([table_a.c.b, table_a.c.a]),
            select([table_a.c.a]).where(table_a.c.b == 5),
            select([table_a.c.a]).where(table_a.c.b == 5).where(table_a.c.a ==
                                                                10),
            select([table_a.c.a]).where(table_a.c.b == 5).with_for_update(),
            select([table_a.c.a]).where(table_a.c.b == 5).with_for_update(
                nowait=True),
            select([table_a.c.a]).where(table_a.c.b == 5).correlate(table_b),
            select([table_a.c.a]).where(table_a.c.b == 5).correlate_except(
                table_b),
        ),
        lambda: (
            select([table_a.c.a]).cte(),
            select([table_a.c.a]).cte(recursive=True),
            select([table_a.c.a]).cte(name="some_cte", recursive=True),
            select([table_a.c.a]).cte(name="some_cte"),
            select([table_a.c.a]).cte(name="some_cte").alias("other_cte"),
            select([table_a.c.a]).cte(name="some_cte").union_all(
                select([table_a.c.a])),
            select([table_a.c.a]).cte(name="some_cte").union_all(
                select([table_a.c.b])),
            select([table_a.c.a]).lateral(),
            select([table_a.c.a]).lateral(name="bar"),
            table_a.tablesample(func.bernoulli(1)),
            table_a.tablesample(func.bernoulli(1), seed=func.random()),
            table_a.tablesample(func.bernoulli(1), seed=func.other_random()),
            table_a.tablesample(func.hoho(1)),
            table_a.tablesample(func.bernoulli(1), name="bar"),
            table_a.tablesample(
                func.bernoulli(1), name="bar", seed=func.random()),
        ),
        lambda: (
            select([table_a.c.a]),
            select([table_a.c.a]).prefix_with("foo"),
            select([table_a.c.a]).prefix_with("foo", dialect="mysql"),
            select([table_a.c.a]).prefix_with("foo", dialect="postgresql"),
            select([table_a.c.a]).prefix_with("bar"),
            select([table_a.c.a]).suffix_with("bar"),
        ),
        lambda: (
            select([table_a_2.c.a]),
            select([table_a_2_fs.c.a]),
            select([table_a_2_bs.c.a]),
        ),
        lambda: (
            select([table_a.c.a]),
            select([table_a.c.a]).with_hint(None, "some hint"),
            select([table_a.c.a]).with_hint(None, "some other hint"),
            select([table_a.c.a]).with_hint(table_a, "some hint"),
            select([table_a.c.a]).with_hint(table_a, "some hint").with_hint(
                None, "some other hint"),
            select([table_a.c.a]).with_hint(table_a, "some other hint"),
            select([table_a.c.a]).with_hint(
                table_a, "some hint", dialect_name="mysql"),
            select([table_a.c.a]).with_hint(
                table_a, "some hint", dialect_name="postgresql"),
        ),
        lambda: (
            table_a.join(table_b, table_a.c.a == table_b.c.a),
            table_a.join(table_b,
                         and_(table_a.c.a == table_b.c.a, table_a.c.b == 1)),
            table_a.outerjoin(table_b, table_a.c.a == table_b.c.a),
        ),
        lambda: (
            table_a.alias("a"),
            table_a.alias("b"),
            table_a.alias(),
            table_b.alias("a"),
            select([table_a.c.a]).alias("a"),
        ),
        lambda: (
            FromGrouping(table_a.alias("a")),
            FromGrouping(table_a.alias("b")),
        ),
        lambda: (
            SelectStatementGrouping(select([table_a])),
            SelectStatementGrouping(select([table_b])),
        ),
        lambda: (
            select([table_a.c.a]).scalar_subquery(),
            select([table_a.c.a]).where(table_a.c.b == 5).scalar_subquery(),
        ),
        lambda: (
            exists().where(table_a.c.a == 5),
            exists().where(table_a.c.b == 5),
        ),
        lambda: (
            union(select([table_a.c.a]), select([table_a.c.b])),
            union(select([table_a.c.a]), select([table_a.c.b])).order_by("a"),
            union_all(select([table_a.c.a]), select([table_a.c.b])),
            union(select([table_a.c.a])),
            union(
                select([table_a.c.a]),
                select([table_a.c.b]).where(table_a.c.b > 5),
            ),
        ),
        lambda: (
            table("a", column("x"), column("y")),
            table("a", column("y"), column("x")),
            table("b", column("x"), column("y")),
            table("a", column("x"), column("y"), column("z")),
            table("a", column("x"), column("y", Integer)),
            table("a", column("q"), column("y", Integer)),
        ),
        lambda: (table_a, table_b),
    ]

    def _complex_fixtures():
        def one():
            a1 = table_a.alias()
            a2 = table_b_like_a.alias()

            stmt = (select([table_a.c.a, a1.c.b,
                            a2.c.b]).where(table_a.c.b == a1.c.b).where(
                                a1.c.b == a2.c.b).where(a1.c.a == 5))

            return stmt

        def one_diff():
            a1 = table_b_like_a.alias()
            a2 = table_a.alias()

            stmt = (select([table_a.c.a, a1.c.b,
                            a2.c.b]).where(table_a.c.b == a1.c.b).where(
                                a1.c.b == a2.c.b).where(a1.c.a == 5))

            return stmt

        def two():
            inner = one().subquery()

            stmt = select([table_b.c.a, inner.c.a, inner.c.b]).select_from(
                table_b.join(inner, table_b.c.b == inner.c.b))

            return stmt

        def three():

            a1 = table_a.alias()
            a2 = table_a.alias()
            ex = exists().where(table_b.c.b == a1.c.a)

            stmt = (select([a1.c.a, a2.c.a]).select_from(
                a1.join(a2, a1.c.b == a2.c.b)).where(ex))
            return stmt

        return [one(), one_diff(), two(), three()]

    fixtures.append(_complex_fixtures)
Beispiel #47
0
from sqlalchemy import table, column

# читаем данные по постгресу
dbdata = open('amounts.txt', 'r').readlines()
IP = dbdata[0][:-1]
scheme = dbdata[1]
tabl = dbdata[3]
user = dbdata[4][:-1]
pwd = dbdata[5]

# Структура таблицы. Это нам еще пригодится
TR = table(
    "amounts",
    column("user"),
    column("special"),
    column("amount"),
    column("percent"),
    column("term"),
    column("Approved"),
    schema='course',
)

dlin = 10000
rng = np.random.default_rng(5000)
np.random.seed(42)

# ID
usrs = rng.integers(low=5000, high=11000, size=dlin)

# Special
spec = rng.integers(low=0, high=2, size=dlin)
Beispiel #48
0
def upgrade():
    op.add_column('user', sa.Column('active', sa.Boolean(), default=False))
    user = sa.table('user', sa.Column('active', sa.Boolean))
    op.execute(user.update().values({'active': True}))
Beispiel #49
0
from alembic import op
import sqlalchemy as sa
from sqlalchemy import orm

from h.util.user import split_user

revision = "b7117b569f8b"
down_revision = "ddb5f0baa429"

Session = orm.sessionmaker()


user = sa.table(
    "user",
    sa.column("username", sa.UnicodeText),
    sa.column("authority", sa.UnicodeText),
    sa.column("nipsa", sa.Boolean),
)
nipsa = sa.table("nipsa", sa.column("userid", sa.UnicodeText))


def upgrade():
    bind = op.get_bind()
    session = Session(bind=bind)

    op.execute(user.update().values(nipsa=False))

    # Fetch all the existing NIPSA'd userids and set the NIPSA flag on the
    # corresponding rows in the user table, if they exist.
    for (userid,) in session.query(nipsa):
        val = split_user(userid)
Beispiel #50
0
class AttributesView(Base, ViewMixin):
    __tablename__ = "vw_attributes"

    __materialized__ = True
    __table_args__ = {
        "selectable":
        select([
            Column("external_object_id", primary_key=True),
            _titles,
            Column("dates", ARRAY(Integer)),
            Column("genres", ARRAY(Text)),
            Column("durations", ARRAY(Float)),
            Column("names", ARRAY(Text)),
            Column("countries", ARRAY(CHAR(length=2))),
            # Build a search vector from the first four titles
            func.setweight(func.to_tsvector(func.coalesce(_titles[0], "")),
                           "A").op("||")
            (func.setweight(func.to_tsvector(func.coalesce(_titles[1], "")),
                            "B")).op("||")
            (func.setweight(func.to_tsvector(func.coalesce(_titles[2], "")),
                            "C")).op("||")
            (func.setweight(func.to_tsvector(func.coalesce(_titles[3], "")),
                            "D")).label("search_vector"),
        ]).select_from(
            crosstab(
                select([
                    Value.external_object_id,
                    Value.type,
                    func.coalesce(
                        array_agg(
                            aggregate_order_by(Value.text,
                                               ValueScoreView.score.desc())),
                        cast(text("'{}'"), ARRAY(Text)),
                    ),
                ]).select_from(
                    join(Value, ValueScoreView,
                         Value.id == ValueScoreView.value_id).join(
                             ValueSource, Value.id == ValueSource.value_id)).
                where(
                    and_(
                        _attribute_filter,
                        tuple_(
                            Value.external_object_id,
                            Value.type,
                            ValueSource.platform_id,
                        ).in_(
                            select([
                                PlatformSourceOrderByValueType.val_eo_id,
                                PlatformSourceOrderByValueType.val_type,
                                PlatformSourceOrderByValueType.pl_id,
                            ]).select_from(PlatformSourceOrderByValueType).
                            where(
                                and_(
                                    PlatformSourceOrderByValueType.pl_order ==
                                    1,
                                    or_(
                                        PlatformSourceOrderByValueType.pl_type
                                        == PlatformType.INFO,
                                        PlatformSourceOrderByValueType.val_type
                                        == ValueType.TITLE,
                                    ),
                                ))),
                    )).group_by(Value.external_object_id, Value.type),
                table(
                    "ct",
                    column("external_object_id", Integer),
                    column("titles", ARRAY(Text)),
                    column("dates", ARRAY(Integer)),
                    column("genres", ARRAY(Text)),
                    column("durations", ARRAY(Float)),
                    column("names", ARRAY(Text)),
                    column("countries", ARRAY(CHAR(length=2))),
                ),
                categories=select(
                    [func.unnest(array([v.name for v in ValueType]))]),
                auto_order=False,
            )),
        "dependencies": (
            Value,
            ValueScoreView,
            ValueSource,
            PlatformSourceOrderByValueType,
        ),
        "materialized":
        True,
        "indexes": (Index("pk_vw_attributes",
                          "external_object_id",
                          unique=True), ),
    }
Revision ID: 7eeb86607ef1
Revises: 9d51ae0ab1e0
Create Date: 2020-09-27 10:24:42.298975

"""
import sqlalchemy as sa
from alembic import op

# revision identifiers, used by Alembic.
revision = "7eeb86607ef1"
down_revision = "9d51ae0ab1e0"
branch_labels = None
depends_on = None

donation_center = sa.table("donation_center", sa.column("slug", sa.String),
                           sa.column("title", sa.String))
medals = sa.table("medals", sa.column("slug", sa.String),
                  sa.column("title", sa.String))


def upgrade():
    op.execute(donation_center.insert().values((
        {
            donation_center.c.slug: "fm",
            donation_center.c.title: "Frýdek-Místek",
        },
        {
            donation_center.c.slug: "fm_bubenik",
            donation_center.c.title: "Frýdek-Místek, Krevní centrum",
        },
        {
"""
Make user uid nullable.

Revision ID: faefe3b614db
Revises: 1a40e75a524d
Create Date: 2017-03-02 14:17:41.708781
"""

import sqlalchemy as sa
from alembic import op

revision = "faefe3b614db"
down_revision = "1a40e75a524d"

user = sa.table("user", sa.column("username", sa.UnicodeText()),
                sa.column("uid", sa.UnicodeText()))


def upgrade():
    op.alter_column("user", "uid", nullable=True)


def downgrade():
    # Backfill the uid column for any users that were created before this was
    # rolled back.
    op.execute(
        user.update().where(user.c.uid == None)  # noqa: E711
        .values(uid=sa.func.lower(sa.func.replace(user.c.username, ".", ""))))
    op.alter_column("user", "uid", nullable=False)
Beispiel #53
0
def upgrade():
    ### commands auto generated by Alembic - please adjust! ###
    from sqlalchemy.sql import text
    op.create_table(
        'customer', sa.Column('id', sa.Integer(), nullable=False),
        sa.Column('first_name', sa.String(length=32), nullable=True),
        sa.Column('last_name', sa.String(length=32), nullable=True),
        sa.Column('mobile_phone', sa.String(length=32), nullable=True),
        sa.Column('email', sa.String(length=64), nullable=True),
        sa.Column('address', sa.String(length=64), nullable=True),
        sa.Column('birthday', sa.DateTime(), nullable=True),
        sa.Column('join_date', sa.DateTime(), nullable=False),
        sa.Column('points', sa.Integer(), nullable=False),
        sa.Column('join_channel_id', sa.Integer(), nullable=False),
        sa.ForeignKeyConstraint(
            ['join_channel_id'],
            ['enum_values.id'],
        ), sa.Column('level_id', sa.Integer(), nullable=False),
        sa.ForeignKeyConstraint(
            ['level_id'],
            ['enum_values.id'],
        ), sa.PrimaryKeyConstraint('id'))
    op.add_column(u'sales_order',
                  sa.Column('customer_id', sa.Integer(), nullable=True))
    op.create_foreign_key(None, 'sales_order', 'customer', ['customer_id'],
                          ['id'])
    enum_values_table = sa.table(
        'enum_values',
        sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
        sa.Column('type_id', sa.Integer(), nullable=True),
        sa.Column('code', sa.String(length=32), nullable=True),
        sa.Column('display', sa.String(length=64), nullable=False),
    )
    res = op.get_bind().execute('SELECT max(id)+1 FROM enum_values')
    results = res.fetchall()
    cm = 29
    for r in results:
        cm = r[0]
    op.bulk_insert(enum_values_table, [
        {
            'id': cm,
            'type_id': 1,
            'code': 'CUSTOMER_JOIN_CHANNEL',
            'display': u'会员注册渠道'
        },
        {
            'id': cm + 1,
            'type_id': 1,
            'code': 'CUSTOMER_LEVEL',
            'display': u'会员级别'
        },
        {
            'id': cm + 2,
            'type_id': cm,
            'code': 'CUSTOMER_JOIN_STORE_CHANNEL',
            'display': u'实体店加入'
        },
        {
            'id': cm + 3,
            'type_id': cm,
            'code': 'CUSTOMER_JOIN_WEB_STORE_CHANNEL',
            'display': u'网店(微信)加入'
        },
        {
            'id': cm + 4,
            'type_id': cm,
            'code': 'CUSTOMER_JOIN_INTRODUCE_CHANNEL',
            'display': u'朋友介绍'
        },
        {
            'id': cm + 6,
            'type_id': cm + 1,
            'code': 'CUSTOMER_LEVEL_BASIC',
            'display': u'基本会员'
        },
        {
            'id': cm + 7,
            'type_id': cm + 1,
            'code': 'CUSTOMER_LEVEL_SILVER',
            'display': u'银卡会员'
        },
        {
            'id': cm + 8,
            'type_id': cm + 1,
            'code': 'CUSTOMER_LEVEL_GOLDEN',
            'display': u'金卡会员'
        },
        {
            'id': cm + 9,
            'type_id': cm + 1,
            'code': 'CUSTOMER_LEVEL_PLATINUM',
            'display': u'白金卡会员'
        },
        {
            'id': cm + 10,
            'type_id': cm + 1,
            'code': 'CUSTOMER_LEVEL_LIFETIME_PLATINUM',
            'display': u'终生白金卡会员'
        },
    ],
                   multiinsert=False)
    op.get_bind().execute(
        text("ALTER SEQUENCE enum_values_id_seq RESTART WITH " + str(cm + 11) +
             ";"))
    role_table = sa.table(
        'role',
        sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
        sa.Column('name', sa.String(length=80), nullable=True),
        sa.Column('description', sa.String(length=255), nullable=True),
    )
    res = op.get_bind().execute('SELECT max(id)+1 FROM role')
    results = res.fetchall()
    rm = 45
    for r in results:
        rm = r[0]
    op.bulk_insert(role_table, [
        {
            'id': rm,
            'name': 'customer_view',
            'description': 'View customers'
        },
        {
            'id': rm + 1,
            'name': 'customer_create',
            'description': 'Create customers'
        },
        {
            'id': rm + 2,
            'name': 'customer_edit',
            'description': 'Edit customers'
        },
        {
            'id': rm + 3,
            'name': 'customer_delete',
            'description': 'Delete customers'
        },
    ],
                   multiinsert=False)
    op.get_bind().execute(
        text("ALTER SEQUENCE role_id_seq RESTART WITH " + str(rm + 4) + ";"))
def upgrade():
    # Rename score table to answer_criterion_score
    with op.batch_alter_table('comparison',
                              naming_convention=convention) as batch_op:
        batch_op.drop_constraint(
            'fk_comparison_comparison_example_id_comparison_example',
            'foreignkey')
        batch_op.drop_index('ix_comparison_completed')
        batch_op.drop_constraint("uq_comparison_uuid", type_='unique')

    try:
        # expected foreign key to follow naming convensions
        with op.batch_alter_table('comparison',
                                  naming_convention=convention) as batch_op:
            # drop the fk before altering the column
            batch_op.drop_constraint('fk_comparison_assignment_id_assignment',
                                     'foreignkey')
            batch_op.drop_constraint('fk_comparison_user_id_user',
                                     'foreignkey')
            batch_op.drop_constraint('fk_comparison_criterion_id_criterion',
                                     'foreignkey')
            batch_op.drop_constraint('fk_comparison_answer1_id_answer',
                                     'foreignkey')
            batch_op.drop_constraint('fk_comparison_answer2_id_answer',
                                     'foreignkey')
            batch_op.drop_constraint('fk_comparison_modified_user_id_user',
                                     'foreignkey')
            batch_op.drop_constraint('fk_comparison_created_user_id_user',
                                     'foreignkey')
    except sa.exc.InternalError:
        # if not, it is likely this name
        with op.batch_alter_table('comparison') as batch_op:
            # drop the fk before altering the column
            batch_op.drop_constraint('comparison_ibfk_1', 'foreignkey')
            batch_op.drop_constraint('comparison_ibfk_2', 'foreignkey')
            batch_op.drop_constraint('comparison_ibfk_3', 'foreignkey')
            batch_op.drop_constraint('comparison_ibfk_4', 'foreignkey')
            batch_op.drop_constraint('comparison_ibfk_5', 'foreignkey')
            batch_op.drop_constraint('comparison_ibfk_7', 'foreignkey')
            batch_op.drop_constraint('comparison_ibfk_8', 'foreignkey')

    # remname comparison table comparison_temp
    op.rename_table('comparison', 'comparison_temp')

    # create new tables
    comparison_table = op.create_table(
        'comparison',
        sa.Column('id', sa.Integer(), nullable=False),
        sa.Column('uuid', sa.CHAR(length=22), nullable=False),
        sa.Column('assignment_id', sa.Integer(), nullable=False),
        sa.Column('user_id', sa.Integer(), nullable=False),
        sa.Column('answer1_id', sa.Integer(), nullable=False),
        sa.Column('answer2_id', sa.Integer(), nullable=False),
        sa.Column('winner',
                  EnumType(WinningAnswer, name='winner'),
                  nullable=True),
        sa.Column('comparison_example_id', sa.Integer(), nullable=True),
        sa.Column('round_compared', sa.Integer(), nullable=False),
        sa.Column('completed', sa.Boolean(name='completed'), nullable=False),
        sa.Column('pairing_algorithm',
                  EnumType(PairingAlgorithm, name='pairing_algorithm'),
                  nullable=True),
        sa.Column('modified_user_id', sa.Integer(), nullable=True),
        sa.Column('modified', sa.DateTime(), nullable=False),
        sa.Column('created_user_id', sa.Integer(), nullable=True),
        sa.Column('created', sa.DateTime(), nullable=False),
        sa.ForeignKeyConstraint(['answer1_id'], ['answer.id'],
                                ondelete='CASCADE'),
        sa.ForeignKeyConstraint(['answer2_id'], ['answer.id'],
                                ondelete='CASCADE'),
        sa.ForeignKeyConstraint(['assignment_id'], ['assignment.id'],
                                ondelete='CASCADE'),
        sa.ForeignKeyConstraint(['comparison_example_id'],
                                ['comparison_example.id'],
                                ondelete='SET NULL'),
        sa.ForeignKeyConstraint(['created_user_id'], ['user.id'],
                                ondelete='SET NULL'),
        sa.ForeignKeyConstraint(['modified_user_id'], ['user.id'],
                                ondelete='SET NULL'),
        sa.ForeignKeyConstraint(['user_id'], ['user.id'], ondelete='CASCADE'),
        sa.PrimaryKeyConstraint('id'),
        sa.UniqueConstraint('uuid'),
        mysql_charset='utf8',
        mysql_collate='utf8_unicode_ci',
        mysql_engine='InnoDB')
    op.create_index(op.f('ix_comparison_completed'),
                    'comparison', ['completed'],
                    unique=False)

    comparison_criterion_table = op.create_table(
        'comparison_criterion',
        sa.Column('id', sa.Integer(), nullable=False),
        sa.Column('uuid', sa.CHAR(length=22), nullable=False),
        sa.Column('comparison_id', sa.Integer(), nullable=False),
        sa.Column('criterion_id', sa.Integer(), nullable=False),
        sa.Column('winner',
                  EnumType(WinningAnswer, name='winner'),
                  nullable=True),
        sa.Column('content', sa.Text(), nullable=True),
        sa.Column('modified_user_id', sa.Integer(), nullable=True),
        sa.Column('modified', sa.DateTime(), nullable=False),
        sa.Column('created_user_id', sa.Integer(), nullable=True),
        sa.Column('created', sa.DateTime(), nullable=False),
        sa.ForeignKeyConstraint(['comparison_id'], ['comparison.id'],
                                ondelete='CASCADE'),
        sa.ForeignKeyConstraint(['created_user_id'], ['user.id'],
                                ondelete='SET NULL'),
        sa.ForeignKeyConstraint(['criterion_id'], ['criterion.id'],
                                ondelete='CASCADE'),
        sa.ForeignKeyConstraint(['modified_user_id'], ['user.id'],
                                ondelete='SET NULL'),
        sa.PrimaryKeyConstraint('id'),
        sa.UniqueConstraint('uuid'),
        mysql_charset='utf8',
        mysql_collate='utf8_unicode_ci',
        mysql_engine='InnoDB')

    # migrate data
    connection = op.get_bind()

    comparison_temp_table = sa.table(
        'comparison_temp',
        sa.column('id', sa.Integer),
        sa.column('uuid', sa.CHAR(22)),
        sa.Column('assignment_id', sa.Integer),
        sa.Column('user_id', sa.Integer),
        sa.Column('criterion_id', sa.Integer),
        sa.Column('answer1_id', sa.Integer),
        sa.Column('answer2_id', sa.Integer),
        sa.Column('winner_id', sa.Integer),
        sa.Column('comparison_example_id', sa.Integer),
        sa.Column('round_compared', sa.Integer),
        sa.Column('content', sa.Text),
        sa.Column('completed', sa.Boolean),
        sa.Column('pairing_algorithm', EnumType(PairingAlgorithm)),
        sa.Column('modified_user_id', sa.Integer),
        sa.Column('created_user_id', sa.Integer),
        sa.Column('modified', sa.DateTime),
        sa.Column('created', sa.DateTime),
    )

    # key is assignment_id+user_id+answer1_id+answer2_id
    # data is criteria wins per answer
    # if record pre-exists in this hash, then do no add new comparison table row
    comparison_aggregate_data = {}

    chunk_size = 5000
    select_result = connection.execution_options(stream_results=True).execute(
        comparison_temp_table.select())
    chunk = select_result.fetchmany(chunk_size)
    while chunk:
        for comparison in chunk:
            key = str(comparison.assignment_id) + "+" + str(
                comparison.user_id) + "+" + str(
                    comparison.answer1_id) + "+" + str(comparison.answer2_id)
            comparison_data = comparison_aggregate_data.get(key)

            modified = comparison.modified if comparison.modified else datetime.utcnow(
            )
            created = comparison.created if comparison.created else datetime.utcnow(
            )
            winner = None
            if comparison.completed:
                if comparison.winner_id == comparison.answer1_id:
                    winner = WinningAnswer.answer1
                elif comparison.winner_id == comparison.answer2_id:
                    winner = WinningAnswer.answer2

            if not comparison_data:
                # populate comparison table
                result = connection.execute(comparison_table.insert().values(
                    uuid=str(base64.urlsafe_b64encode(
                        uuid.uuid4().bytes)).replace('=', ''),
                    assignment_id=comparison.assignment_id,
                    user_id=comparison.user_id,
                    answer1_id=comparison.answer1_id,
                    answer2_id=comparison.answer2_id,
                    winner=None,  #to be decided after talling all comparisons
                    comparison_example_id=comparison.comparison_example_id,
                    round_compared=comparison.round_compared,
                    completed=comparison.completed,
                    pairing_algorithm=comparison.pairing_algorithm,
                    modified=modified,
                    created=created))
                comparison_data = {
                    'comparison_id': result.inserted_primary_key,
                    'completed': comparison.completed
                }
                if comparison.completed:
                    comparison_data['answer1'] = 0
                    comparison_data['answer2'] = 0

                comparison_aggregate_data[key] = comparison_data

            if winner == WinningAnswer.answer1:
                comparison_data['answer1'] += 1
            elif winner == WinningAnswer.answer2:
                comparison_data['answer2'] += 1

            # populate comparison_criterion table
            connection.execute(comparison_criterion_table.insert().values(
                uuid=comparison.uuid,
                comparison_id=comparison_data.get('comparison_id'),
                criterion_id=comparison.criterion_id,
                winner=winner,
                content=comparison.content,
                modified=modified,
                created=created))
        # fetch next chunk
        chunk = select_result.fetchmany(chunk_size)

    # only update completed comparisons
    for key, comparison_data in comparison_aggregate_data.items():
        if comparison_data.get('completed'):
            comparison_id = comparison_data.get('comparison_id')
            answer1 = comparison_data.get('answer1')
            answer2 = comparison_data.get('answer2')

            winner = WinningAnswer.draw
            if answer1 > answer2:
                winner = WinningAnswer.answer1
            elif answer2 > answer1:
                winner = WinningAnswer.answer2

            connection.execute(comparison_table.update().where(
                comparison_table.c.id == comparison_id).values(winner=winner))

    # drop old data table
    op.drop_table('comparison_temp')
Beispiel #55
0
def src_table(dataset: str, version: str) -> Table:
    src_table: Table = table(version)
    src_table.schema = dataset
    return src_table
def downgrade():
    # expected foreign key to follow naming convensions
    with op.batch_alter_table('comparison',
                              naming_convention=convention) as batch_op:
        # drop the fk before altering the column
        batch_op.drop_constraint('fk_comparison_assignment_id_assignment',
                                 'foreignkey')
        batch_op.drop_constraint('fk_comparison_user_id_user', 'foreignkey')
        batch_op.drop_constraint('fk_comparison_answer1_id_answer',
                                 'foreignkey')
        batch_op.drop_constraint('fk_comparison_answer2_id_answer',
                                 'foreignkey')
        batch_op.drop_constraint('fk_comparison_modified_user_id_user',
                                 'foreignkey')
        batch_op.drop_constraint('fk_comparison_created_user_id_user',
                                 'foreignkey')
        batch_op.drop_constraint(
            'fk_comparison_comparison_example_id_comparison_example',
            'foreignkey')
        batch_op.drop_index('ix_comparison_completed')
        batch_op.drop_constraint("uq_comparison_uuid", type_='unique')

    # remname comparison_temp table comparison
    op.rename_table('comparison', 'comparison_temp')

    # create old comparison table
    comparison_table = op.create_table(
        'comparison',
        sa.Column('id', sa.Integer(), nullable=False),
        sa.Column('uuid', sa.CHAR(22), nullable=False),
        sa.Column('assignment_id', sa.Integer(), nullable=False),
        sa.Column('user_id', sa.Integer(), nullable=False),
        sa.Column('criterion_id', sa.Integer(), nullable=False),
        sa.Column('answer1_id', sa.Integer(), nullable=False),
        sa.Column('answer2_id', sa.Integer(), nullable=False),
        sa.Column('winner_id', sa.Integer(), nullable=True),
        sa.Column('comparison_example_id', sa.Integer(), nullable=True),
        sa.Column('round_compared', sa.Integer(), nullable=False),
        sa.Column('content', sa.Text(), nullable=True),
        sa.Column('completed', sa.Boolean(name='completed'), nullable=False),
        sa.Column('pairing_algorithm',
                  EnumType(PairingAlgorithm, name='pairing_algorithm'),
                  nullable=True),
        sa.Column('modified_user_id', sa.Integer(), nullable=True),
        sa.Column('modified', sa.DateTime(), nullable=False),
        sa.Column('created_user_id', sa.Integer(), nullable=True),
        sa.Column('created', sa.DateTime(), nullable=False),
        sa.ForeignKeyConstraint(['answer1_id'], ['answer.id'],
                                ondelete='CASCADE'),
        sa.ForeignKeyConstraint(['answer2_id'], ['answer.id'],
                                ondelete='CASCADE'),
        sa.ForeignKeyConstraint(['assignment_id'], ['assignment.id'],
                                ondelete='CASCADE'),
        sa.ForeignKeyConstraint(['comparison_example_id'],
                                ['comparison_example.id'],
                                ondelete='SET NULL'),
        sa.ForeignKeyConstraint(['created_user_id'], ['user.id'],
                                ondelete='SET NULL'),
        sa.ForeignKeyConstraint(['criterion_id'], ['criterion.id'],
                                ondelete='CASCADE'),
        sa.ForeignKeyConstraint(['modified_user_id'], ['user.id'],
                                ondelete='SET NULL'),
        sa.ForeignKeyConstraint(['user_id'], ['user.id'], ondelete='CASCADE'),
        sa.ForeignKeyConstraint(['winner_id'], ['answer.id'],
                                ondelete='CASCADE'),
        sa.PrimaryKeyConstraint('id'),
        sa.UniqueConstraint('uuid'),
        mysql_collate='utf8_unicode_ci',
        mysql_default_charset='utf8',
        mysql_engine='InnoDB')
    op.create_index(op.f('ix_comparison_completed'),
                    'comparison', ['completed'],
                    unique=False)

    # migrate data
    connection = op.get_bind()

    comparison_temp_table = sa.table(
        'comparison_temp',
        sa.column('id', sa.Integer),
        sa.Column('assignment_id', sa.Integer),
        sa.Column('user_id', sa.Integer),
        sa.Column('answer1_id', sa.Integer),
        sa.Column('answer2_id', sa.Integer),
        sa.Column('comparison_example_id', sa.Integer),
        sa.Column('round_compared', sa.Integer),
        sa.Column('completed', sa.Boolean),
        sa.Column('pairing_algorithm', EnumType(PairingAlgorithm)),
        sa.Column('modified_user_id', sa.Integer),
        sa.Column('created_user_id', sa.Integer),
        sa.Column('modified', sa.DateTime),
        sa.Column('created', sa.DateTime),
    )

    comparison_criterion_table = sa.table(
        'comparison_criterion',
        sa.column('id', sa.Integer),
        sa.column('uuid', sa.CHAR(22)),
        sa.Column('comparison_id', sa.Integer),
        sa.Column('criterion_id', sa.Integer),
        sa.Column('winner', EnumType(WinningAnswer)),
        sa.Column('content', sa.Text),
        sa.Column('modified_user_id', sa.Integer),
        sa.Column('created_user_id', sa.Integer),
        sa.Column('modified', sa.DateTime),
        sa.Column('created', sa.DateTime),
    )

    chunk_size = 5000
    select_result = connection.execution_options(stream_results=True).execute(
        comparison_criterion_table.select())
    chunk = select_result.fetchmany(chunk_size)
    while chunk:
        for comparison_criterion in chunk:
            comparison = None
            comparisons = connection.execute(
                comparison_temp_table.select().where(
                    comparison_temp_table.c.id ==
                    comparison_criterion.comparison_id))
            for c in comparisons:
                comparison = c

            if comparison == None:
                continue

            modified = comparison_criterion.modified if comparison_criterion.modified else datetime.utcnow(
            )
            created = comparison_criterion.created if comparison_criterion.created else datetime.utcnow(
            )

            winner_id = None
            if comparison_criterion.winner == WinningAnswer.answer1:
                winner_id = comparison.answer1_id
            elif comparison_criterion.winner == WinningAnswer.answer2:
                winner_id = comparison.answer2_id

            # populate comparison table
            connection.execute(comparison_table.insert().values(
                uuid=comparison_criterion.uuid,
                assignment_id=comparison.assignment_id,
                user_id=comparison.user_id,
                criterion_id=comparison_criterion.criterion_id,
                answer1_id=comparison.answer1_id,
                answer2_id=comparison.answer2_id,
                winner_id=winner_id,
                comparison_example_id=comparison.comparison_example_id,
                round_compared=comparison.round_compared,
                content=comparison_criterion.content,
                completed=comparison.completed,
                pairing_algorithm=comparison.pairing_algorithm,
                modified=modified,
                created=created))
        # fetch next chunk
        chunk = select_result.fetchmany(chunk_size)

    # drop new tables
    op.drop_table('comparison_criterion')
    op.drop_table('comparison_temp')
Beispiel #57
0
class SubqueryCoercionsTest(fixtures.TestBase, AssertsCompiledSQL):
    __dialect__ = "default"

    table1 = table(
        "mytable",
        column("myid", Integer),
        column("name", String),
        column("description", String),
    )

    table2 = table("myothertable", column("otherid", Integer),
                   column("othername", String))

    def test_select_of_select(self):
        stmt = select([self.table1.c.myid])

        with testing.expect_deprecated(
                r"The SelectBase.select\(\) method is deprecated and will be "
                "removed"):
            self.assert_compile(
                stmt.select(),
                "SELECT anon_1.myid FROM (SELECT mytable.myid AS myid "
                "FROM mytable) AS anon_1",
            )

    def test_join_of_select(self):
        stmt = select([self.table1.c.myid])

        with testing.expect_deprecated(
                r"The SelectBase.join\(\) method is deprecated and will be "
                "removed"):
            self.assert_compile(
                stmt.join(self.table2,
                          self.table2.c.otherid == self.table1.c.myid),
                # note the SQL is wrong here as the subquery now has a name.
                # however, even SQLite which accepts unnamed subqueries in a
                # JOIN cannot actually join with how SQLAlchemy 1.3 and
                # earlier would render:
                # sqlite> select myid, otherid from (select myid from mytable)
                # join myothertable on mytable.myid=myothertable.otherid;
                # Error: no such column: mytable.myid
                # if using stmt.c.col, that fails often as well if there are
                # any naming overlaps:
                # sqlalchemy.exc.OperationalError: (sqlite3.OperationalError)
                # ambiguous column name: id
                # [SQL: SELECT id, data
                # FROM (SELECT a.id AS id, a.data AS data
                # FROM a) JOIN b ON b.a_id = id]
                # so that shows that nobody is using this anyway
                "(SELECT mytable.myid AS myid FROM mytable) AS anon_1 "
                "JOIN myothertable ON myothertable.otherid = mytable.myid",
            )

    def test_outerjoin_of_select(self):
        stmt = select([self.table1.c.myid])

        with testing.expect_deprecated(
                r"The SelectBase.outerjoin\(\) method is deprecated and will be "
                "removed"):
            self.assert_compile(
                stmt.outerjoin(self.table2,
                               self.table2.c.otherid == self.table1.c.myid),
                # note the SQL is wrong here as the subquery now has a name
                "(SELECT mytable.myid AS myid FROM mytable) AS anon_1 "
                "LEFT OUTER JOIN myothertable "
                "ON myothertable.otherid = mytable.myid",
            )

    def test_column_roles(self):
        stmt = select([self.table1.c.myid])

        for role in [
                roles.WhereHavingRole,
                roles.ExpressionElementRole,
                roles.ByOfRole,
                roles.OrderByRole,
                # roles.LabeledColumnExprRole
        ]:
            with testing.expect_deprecated(
                    "coercing SELECT object to scalar "
                    "subquery in a column-expression context is deprecated"):
                coerced = coercions.expect(role, stmt)
                is_true(coerced.compare(stmt.scalar_subquery()))

            with testing.expect_deprecated(
                    "coercing SELECT object to scalar "
                    "subquery in a column-expression context is deprecated"):
                coerced = coercions.expect(role, stmt.alias())
                is_true(coerced.compare(stmt.scalar_subquery()))

    def test_labeled_role(self):
        stmt = select([self.table1.c.myid])

        with testing.expect_deprecated(
                "coercing SELECT object to scalar "
                "subquery in a column-expression context is deprecated"):
            coerced = coercions.expect(roles.LabeledColumnExprRole, stmt)
            is_true(coerced.compare(stmt.scalar_subquery().label(None)))

        with testing.expect_deprecated(
                "coercing SELECT object to scalar "
                "subquery in a column-expression context is deprecated"):
            coerced = coercions.expect(roles.LabeledColumnExprRole,
                                       stmt.alias())
            is_true(coerced.compare(stmt.scalar_subquery().label(None)))

    def test_scalar_select(self):

        with testing.expect_deprecated(
                "coercing SELECT object to scalar "
                "subquery in a column-expression context is deprecated"):
            self.assert_compile(
                func.coalesce(select([self.table1.c.myid])),
                "coalesce((SELECT mytable.myid FROM mytable))",
            )

        with testing.expect_deprecated(
                "coercing SELECT object to scalar "
                "subquery in a column-expression context is deprecated"):
            s = select([self.table1.c.myid]).alias()
            self.assert_compile(
                select([self.table1.c.myid]).where(self.table1.c.myid == s),
                "SELECT mytable.myid FROM mytable WHERE "
                "mytable.myid = (SELECT mytable.myid FROM "
                "mytable)",
            )

        with testing.expect_deprecated(
                "coercing SELECT object to scalar "
                "subquery in a column-expression context is deprecated"):
            self.assert_compile(
                select([self.table1.c.myid]).where(s > self.table1.c.myid),
                "SELECT mytable.myid FROM mytable WHERE "
                "mytable.myid < (SELECT mytable.myid FROM "
                "mytable)",
            )

        with testing.expect_deprecated(
                "coercing SELECT object to scalar "
                "subquery in a column-expression context is deprecated"):
            s = select([self.table1.c.myid]).alias()
            self.assert_compile(
                select([self.table1.c.myid]).where(self.table1.c.myid == s),
                "SELECT mytable.myid FROM mytable WHERE "
                "mytable.myid = (SELECT mytable.myid FROM "
                "mytable)",
            )

        with testing.expect_deprecated(
                "coercing SELECT object to scalar "
                "subquery in a column-expression context is deprecated"):
            self.assert_compile(
                select([self.table1.c.myid]).where(s > self.table1.c.myid),
                "SELECT mytable.myid FROM mytable WHERE "
                "mytable.myid < (SELECT mytable.myid FROM "
                "mytable)",
            )

    def test_standalone_alias(self):
        with testing.expect_deprecated(
                "Implicit coercion of SELECT and textual SELECT constructs"):
            stmt = alias(select([self.table1.c.myid]), "foo")

        self.assert_compile(stmt, "SELECT mytable.myid FROM mytable")

        is_true(
            stmt.compare(select([self.table1.c.myid]).subquery().alias("foo")))

    def test_as_scalar(self):
        with testing.expect_deprecated(
                r"The SelectBase.as_scalar\(\) method is deprecated and "
                "will be removed in a future release."):
            stmt = select([self.table1.c.myid]).as_scalar()

        is_true(stmt.compare(select([self.table1.c.myid]).scalar_subquery()))

    def test_as_scalar_from_subquery(self):
        with testing.expect_deprecated(
                r"The Subquery.as_scalar\(\) method, which was previously "
                r"``Alias.as_scalar\(\)`` prior to version 1.4"):
            stmt = select([self.table1.c.myid]).subquery().as_scalar()

        is_true(stmt.compare(select([self.table1.c.myid]).scalar_subquery()))

    def test_fromclause_subquery(self):
        stmt = select([self.table1.c.myid])
        with testing.expect_deprecated(
                "Implicit coercion of SELECT and textual SELECT constructs "
                "into FROM clauses is deprecated"):
            coerced = coercions.expect(roles.StrictFromClauseRole,
                                       stmt,
                                       allow_select=True)

        is_true(coerced.compare(stmt.subquery()))

    def test_plain_fromclause_select_to_subquery(self):
        with testing.expect_deprecated(
                "Implicit coercion of SELECT and textual SELECT "
                "constructs into FROM clauses is deprecated;"):
            element = coercions.expect(
                roles.FromClauseRole,
                SelectStatementGrouping(select([self.table1])),
            )
            is_true(
                element.compare(
                    SelectStatementGrouping(select([self.table1])).subquery()))

    def test_functions_select_method_two(self):
        expr = func.rows("foo")
        with testing.expect_deprecated(
                "Implicit coercion of SELECT and textual SELECT constructs "
                "into FROM clauses is deprecated"):
            stmt = select(["*"]).select_from(expr.select())
        self.assert_compile(
            stmt, "SELECT * FROM (SELECT rows(:rows_2) AS rows_1) AS anon_1")

    def test_functions_with_cols(self):
        users = table("users", column("id"), column("name"),
                      column("fullname"))
        calculate = select(
            [column("q"), column("z"), column("r")],
            from_obj=[
                func.calculate(bindparam("x", None), bindparam("y", None))
            ],
        )

        with testing.expect_deprecated(
                "The SelectBase.c and SelectBase.columns attributes are "
                "deprecated and will be removed"):
            self.assert_compile(
                select([users], users.c.id > calculate.c.z),
                "SELECT users.id, users.name, users.fullname "
                "FROM users, (SELECT q, z, r "
                "FROM calculate(:x, :y)) AS anon_1 "
                "WHERE users.id > anon_1.z",
            )
Beispiel #58
0
class ImportFile(Base):
    __tablename__ = "import_file"

    import_file_id_seq = Sequence("import_file_id_seq", metadata=Base.metadata)
    id = Column(
        Integer,
        import_file_id_seq,
        server_default=import_file_id_seq.next_value(),
        primary_key=True,
    )

    status = Column(Enum(ImportFileStatus), default=None, nullable=False)
    filename = Column(String, nullable=False)
    fields = Column(HSTORE, nullable=False)
    effective_date = Column(Date, default=datetime.now(), nullable=False)

    platform_id = Column(
        Integer, ForeignKey(Platform.id, onupdate="CASCADE"), nullable=True
    )
    """The platform the new object and attributes will be assigned to"""

    imported_external_object_type = Column(Enum(ExternalObjectType), nullable=True)
    """The type of the newly imported objects"""

    provider_id = Column(
        Integer,
        ForeignKey("provider.id", ondelete="SET NULL", onupdate="SET NULL"),
        nullable=True,
    )

    platform = relationship(Platform, back_populates="imports")
    logs = relationship("ImportFileLog", back_populates="file")

    last_activity = column_property(
        select([column("timestamp")])
        .select_from(table("import_file_log"))
        .where(column("import_file_id") == id)
        .order_by(column("timestamp").desc())
        .limit(1),
        deferred=True,
    )

    links = relationship(
        "ObjectLink", secondary="import_link", back_populates="imports"
    )

    sessions = relationship(
        "Session", secondary="session_import_file", back_populates="imports"
    )

    provider = relationship("Provider", back_populates="imports")

    def __init__(self, **kwargs):
        super(ImportFile, self).__init__(**kwargs)
        self._codec = None

    @orm.reconstructor
    def init_on_load(self):
        self._codec = None
        self._line_count = None

    @before("upload")
    def upload_file(self, file):
        self.filename = file.filename
        self.fields = {}

    @property
    def path(self):
        return import_path() / (str(self.id) + ".csv")

    def open(self):
        file = self.path.open(mode="rb")

        if not self._codec:
            detector = UniversalDetector()
            for line in file.readlines():
                detector.feed(line)
                if detector.done:
                    break
            detector.close()
            file.seek(0)
            codec = detector.result["encoding"]

            try:
                self._codec = codecs.lookup("sloppy-" + codec)
            except LookupError:
                self._codec = codecs.lookup(codec)

        return TextIOWrapper(file, encoding=self._codec.name)

    def get_line_count(self):
        if self._line_count is None:
            try:
                with self.open() as f:
                    self._line_count = sum(1 for line in f)
            except Exception:
                self._line_count = 0

        return self._line_count

    def get_codec(self):
        if not self._codec:
            self.open().close()

        return self._codec.name

    def detect_dialect(self, f):
        extract = "".join(line for line in f.readlines()[:100])
        return csv.Sniffer().sniff(extract)

    @contextmanager
    def csv_reader(self):
        with self.open() as f:
            dialect = self.detect_dialect(f)
            f.seek(0)
            yield csv.reader(f, dialect=dialect)

    def header(self):
        try:
            with self.csv_reader() as r:
                return next(r)
        except (IOError, UnicodeError):
            # FIXME: handle those errors
            return []

    def map_fields(
        self, header: list
    ) -> Dict[str, Union[List[int], Dict[str, List[int]]]]:
        """Map self.fields to header indexes"""
        output = {
            "external_object_id": [],
            "attribute": {},
            "attribute_list": {},
            "link": {},
        }  # type: Dict[str, Union[List[int], Dict[str, List[int]]]]

        for key, value in self.fields.items():
            if not value:
                continue

            indexes = [idx for idx, column in enumerate(header) if column == key]
            type_, _, arg = value.partition(".")
            assert type_, key + " is empty"
            assert indexes, "no matching columns found for " + key

            out = output[type_]

            if isinstance(out, list):
                assert not arg, type_ + " should have no argument"
                out.extend(indexes)
            elif isinstance(out, dict):
                assert arg, type_ + " needs an argument"
                out[arg] = out.get(arg, []) + indexes

        return output

    def map_line(
        self, fields, line: List[str]
    ) -> Tuple[
        List[int], List[Tuple[ValueType, List[str]]], List[Tuple[Platform, List[str]]]
    ]:
        external_object_ids = [
            int(line[i]) for i in fields["external_object_id"] if line[i]
        ]

        attributes = []  # type: List[Tuple[ValueType, List[str]]]
        for attribute in ValueType:
            attr_list = []  # type: List[str]
            attr_list += [
                line[i].strip()
                for i in fields["attribute"].get(str(attribute), [])
                if line[i]
            ]
            attr_list += [
                li.strip()
                for i in fields["attribute_list"].get(str(attribute), [])
                for li in line[i].split(",")
                if li
            ]
            if attr_list:
                attributes.append((attribute, attr_list))

        links = []  # type: List[Tuple[Platform, List[str]]]
        for key, value in fields["link"].items():
            key = key.replace("_", "-")  # FIXME: not sure if this is a good idea
            link_list = [line[i] for i in value if line[i]]
            if link_list:
                links.append((key, link_list))

        return external_object_ids, attributes, links

    @after("process")
    @inject_session
    def process_import(self, session=None):
        from matcher.tasks.import_ import process_row, mark_done

        tasks = []

        with self.csv_reader() as reader:
            # Fetch the header and map to fields
            header = next(reader)
            fields = self.map_fields(header)

            # Start reading the file
            for ln, line in enumerate(reader):
                ids, attributes, links = self.map_line(fields, line)

                # TODO: this is quite ugly, and this only because we need to
                # only pass JSON-serializable objects to celery tasks.
                attributes = [(str(k), v) for (k, v) in attributes]
                tasks.append(process_row.si(self.id, ids, attributes, links))

        # FIXME(sandhose): is this really needed?
        session.add(self)
        session.commit()

        chord(tasks, mark_done.si(self.id)).apply_async(countdown=10)

    @inject_session
    def reduce_or_create_ids(self, external_object_ids: List[int], session=None):
        """Reduce a list of ExternalObject.id into a single (merged) ExternalObject. Creates a new one when empty"""
        if external_object_ids:
            # If there are external_object_ids, fetch the first and merge the rest
            ids = iter(set(external_object_ids))
            obj = session.query(ExternalObject).get(next(ids))

            # Merge the additional external_object_ids
            for id_ in ids:
                to_merge = session.query(ExternalObject).get(id_)
                if to_merge:
                    if obj is None:
                        obj = to_merge
                    else:
                        try:
                            to_merge.merge_and_delete(obj, session=session)
                        except (LinksOverlap, ObjectTypeMismatchError):
                            logger.warning("Error while merging", exc_info=True)
        else:
            # else create a new object
            assert self.imported_external_object_type
            obj = ExternalObject(type=self.imported_external_object_type)
            session.add(obj)

        session.commit()
        return obj

    @inject_session
    def find_additional_links(
        self, links: List[Tuple[Platform, List[str]]], session=None
    ):
        """Find additional ExternalObjects from the links"""
        tuples = [
            (p.id, external_id)
            for (p, external_ids) in links
            for external_id in external_ids
        ]
        ids = (
            session.query(ObjectLink.external_object_id)
            .filter(tuple_(ObjectLink.platform_id, ObjectLink.external_id).in_(tuples))
            .all()
        )
        return [id_ for (id_,) in ids]

    @inject_session
    def process_row(
        self,
        external_object_ids: List[int],
        attributes: List[Tuple[ValueType, List[str]]],
        links: List[Tuple[Platform, List[str]]],
        session=None,
    ) -> None:
        # This does a lot of things.
        # We have three type of data possible in a row:
        #  - `ExternalObject.id`s, that should be merged in one object
        #  - A list of links for this row
        #  - A list of attributes to add
        #
        # First thing we do is to list all the objects that should be merged.
        # This includes the provided external_object_ids and the one we can
        # find using the provided links.
        #
        # If no ExternalObject was found, we create a new one, using the
        # `imported_external_object_type` provided in the ImportFile
        #
        # After everything is merged, we need to check what ObjectLink needs to
        # be replaced. Those are first deleted, and all the values that were
        # added by the associated platforms are deleted as well.
        #
        # The last step is to add the attributes that were provided. For this
        # we need to have a platform set in the ImportFile, because the value
        # need to have a source in order to have a score big enough.
        #
        # One caveat: it **is possible** to insert new objects without link to
        # the platform, which can lead to duplicates and orphan objects.
        #
        # The whole thing is not transactionnal and might f*ck everything up.
        # This needs to be tested a lot before using it for real.

        if attributes:
            # If we want to insert attributes we need a platform to which to assign them
            assert self.platform

        if len(external_object_ids) > 0:
            # We are about to add new links, remove the old one and clear the attributes set by it
            for (platform, external_ids) in links:
                if len(external_ids) == 0:
                    continue

                logger.info("Deleting links (%d, %r)", platform.id, external_ids)
                existing_links = (
                    session.query(ObjectLink)
                    .filter(
                        ObjectLink.platform == platform,
                        ObjectLink.external_object_id.in_(external_object_ids),
                        ~ObjectLink.external_id.in_(external_ids),
                    )
                    .delete(synchronize_session=False)
                )

                if existing_links:
                    logger.info("A link was removed, deleting values")
                    # Delete the values that were associated with this platform
                    session.query(ValueSource).filter(
                        ValueSource.platform == platform
                    ).filter(
                        ValueSource.value_id.in_(
                            session.query(Value.id).filter(
                                Value.external_object_id.in_(external_object_ids)
                            )
                        )
                    ).delete(
                        synchronize_session=False
                    )

            session.commit()

        # Fetch other existing external_object_ids from the links
        external_object_ids += self.find_additional_links(links=links, session=session)

        if len(external_object_ids) <= 1 and not attributes and not links:
            return

        # Get one merged ExternalObject
        obj = self.reduce_or_create_ids(external_object_ids, session=session)

        if obj is None:
            logger.error("External object not found %r", external_object_ids)
            return

        # Add the new links
        for (platform, external_ids) in links:
            for external_id in external_ids:
                link = (
                    session.query(ObjectLink)
                    .filter(
                        ObjectLink.external_id == external_id,
                        ObjectLink.platform == platform,
                        ObjectLink.external_object == obj,
                    )
                    .first()
                )

                if not link:
                    link = ObjectLink(
                        external_object=obj, platform=platform, external_id=external_id
                    )

                if platform == self.platform:
                    self.links.append(link)

        session.commit()

        attributes_list = set()

        # Map the attributes to dicts accepted by add_attribute
        for (type_, values) in attributes:
            # The values are weighed by their position in the array
            for scale, value in enumerate(reversed(values), 1):
                attributes_list.add(attr_type(type_, value, 1 * scale))

                # Format the attribute (e.g. map to ISO code or extract the year in a date)
                fmt = type_.fmt(value)
                if fmt and fmt != value:
                    attributes_list.add(attr_type(type_, fmt, 1.2 * scale))

        for attribute in attributes_list:
            obj.add_attribute(dict(attribute._asdict()), self.platform)

        # Cleanup attributes with no sources
        session.query(Value).filter(Value.external_object_id == obj.id).filter(
            ~Value.sources.any()
        ).delete(synchronize_session=False)
        session.commit()

        logger.info("Imported %d", obj.id)

    @after
    def log_status(self, message=None, *_, **__):
        self.logs.append(ImportFileLog(status=self.status, message=message))
Create Date: 2020-12-28 12:51:04.008572

"""
from alembic import op
import sqlalchemy as sa


# revision identifiers, used by Alembic.
revision = "eb9f0fb6d3ae"
down_revision = "20a01e824eee"
branch_labels = None
depends_on = None

access_verbs_table = sa.table(
    "access_verbs",
    sa.column("access_verb_id", sa.SmallInteger),
    sa.column("access_verb_name", sa.String),
)


def upgrade():
    op.bulk_insert(
        access_verbs_table,
        [
            {"access_verb_id": 0, "access_verb_name": "NONE"},
            {"access_verb_id": 1, "access_verb_name": "READ"},
            {"access_verb_id": 2, "access_verb_name": "WRITE"},
            {"access_verb_id": 3, "access_verb_name": "READWRITE"},
            {"access_verb_id": 4, "access_verb_name": "READACCESS"},
            {"access_verb_id": 8, "access_verb_name": "WRITEACCESS"},
            {"access_verb_id": 15, "access_verb_name": "ALL"},
from sqlalchemy import table, column

song = table(
    'songs',
    column('id'),
    column('name'),
    column('artist'),
    column('genre'),
    column('date_created')
)