示例#1
0
        def export_terms(dest_star, job):
            v = self.variable_table(dest_star)
            keys = job.concepts['keys']
            names = job.concepts['names']
            paths = I2B2MetaData.keys_to_paths(keys)
            v.create(bind=dest_db)
            dest_db.execute(v.insert(),
                            [dict(id=id,
                                  item_key=key,
                                  concept_path=path,
                                  name_char=name,
                                  name=strip_counts(name))
                             for (id, (path, key, name)) in
                             enumerate(zip(paths, keys, names))])

            [(q_cd, result_cd), (q_md, result_md)] = job.term_info()
            cd = dest_star.tables['concept_dimension']
            md = dest_star.tables['modifier_dimension']

            values = lambda _: dict(concept_path=bindparam('concept_path'),
                                    concept_cd=bindparam('concept_cd'))
            tc.copy_in_chunks(dest_db, result_cd, cd,
                              'concept_dimension', [],
                              values=values)
            values = lambda _: dict(concept_path=bindparam('modifier_path'),
                                    concept_cd=bindparam('modifer_cd'))
            tc.copy_in_chunks(dest_db, result_cd, md,
                              'modifier_dimension', [])
示例#2
0
 def has_table(self, connection, table_name, schema=None):
     # seems like case gets folded in pg_class...
     if schema is None:
         cursor = connection.execute(
             sql.text(
                 "select name "
                 "from sys.tables "
                 "where system = false "
                 "and type = 0 "
                 "and name=:name",
                 bindparams=[
                     sql.bindparam('name', util.text_type(table_name),
                                   type_=sqltypes.Unicode)]
             )
         )
     else:
         cursor = connection.execute(
             sql.text(
                 "SELECT tables.name "
                 "FROM sys.tables, sys.schemas "
                 "WHERE tables.system = FALSE "
                 "AND tables.schema_id = schemas.id "
                 "AND type = 0 "
                 "AND tables.name = :name "
                 "AND schemas.name = :schema",
                 bindparams=[
                     sql.bindparam('name',
                                   util.text_type(table_name),
                                   type_=sqltypes.Unicode),
                     sql.bindparam('schema',
                                   util.text_type(schema),
                                   type_=sqltypes.Unicode)]
             )
         )
     return bool(cursor.first())
示例#3
0
    def get_table_oid(self, connection, table_name, schema=None, **kw):
        """Fetch the oid for schema.table_name.

        Several reflection methods require the table oid.  The idea for using
        this method is that it can be fetched one time and cached for
        subsequent calls.

        """
        table_oid = None
        if schema is not None:
            schema_where_clause = "n.nspname = :schema"
        else:
            schema_where_clause = "pg_catalog.pg_table_is_visible(c.oid)"
        query = """
            SELECT c.oid
            FROM pg_catalog.pg_class c
            LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
            WHERE (%s)
            AND c.relname = :table_name AND c.relkind in ('r', 'v', 'f')
        """ % schema_where_clause
        # Since we're binding to unicode, table_name and schema_name must be
        # unicode.
        table_name = str(table_name)
        bindparams = [sql.bindparam('table_name', type_=sqltypes.Unicode)]
        if schema is not None:
            schema = str(schema)
            bindparams.append(sql.bindparam('schema', type_=sqltypes.Unicode))
        s = sql.text(
            query, bindparams=bindparams, typemap={'oid': sqltypes.Integer})
        c = connection.execute(s, table_name=table_name, schema=schema)
        table_oid = c.scalar()
        if table_oid is None:
            raise exc.NoSuchTableError(table_name)
        return table_oid
示例#4
0
    def _save_concepts(cls, concepts):
        r'''Prepare to save concepts in a temporary table.

        >>> concepts = dict(
        ...     names=['apples', 'bananas', 'cherries'],
        ...     keys=[r'\\tk\a', r'\\tk\b', r'\\tk\c'])
        >>> tmp, ins, bind = DataExtract._save_concepts(concepts)

        >>> print tmp.delete()
        DELETE FROM global_temp_fact_param_table

        >>> print ins
        ... # doctest: +NORMALIZE_WHITESPACE
        INSERT INTO global_temp_fact_param_table (char_param1, char_param2)
        VALUES (:path, :name)

        >>> sorted(bind[0].keys())
        ['name', 'path']

        >>> sorted(bind[0].values())
        ['\\a', 'apples']

        '''
        names = concepts['names']
        paths = I2B2MetaData.keys_to_paths(concepts['keys'])
        bind = [dict(name=name,
                     path=path)
                for (path, name) in zip(paths, names)]
        tmp = i2b2_star.t_global_temp_fact_param_table
        ins = tmp.insert().values(char_param1=bindparam('path'),
                                  char_param2=bindparam('name'))
        return tmp, ins, bind
示例#5
0
    def init(self, session, api):
        self.session = session
        self.api = api

        # compile queries
        self.command_queries = {}
        self.command_queries['character'] = \
            session.query(Command).\
            filter(Command.actor_id == bindparam('actor_id'))

        self.command_queries['children'] = \
            session.query(Actor, Command).\
            filter(Actor.id == bindparam('actor_id')).\
            join(Actor.parent, aliased=True).\
            filter(Command.actor_id == Actor.id)

        location = aliased(Actor)
        self.command_queries['location'] = \
            session.query(Command).\
            join(location).\
            join(Actor, location.id == Actor.parent_id).\
            filter(Actor.id == bindparam('actor_id'))

        location = aliased(Actor)
        children = aliased(Actor)
        self.command_queries['location_children'] = \
            session.query(Command).\
            join(children).\
            join(location, location.id == children.parent_id).\
            join(Actor, location.id == Actor.parent_id).\
            filter(Actor.id == bindparam('actor_id')).\
            filter(Command.actor_id == children.id)

        self.command_precedence = ['character', 'children', 'location', 'location_children']
示例#6
0
    def get(self, database, query):
        bs = block_size.c.block_size
        stmt = powa_getstatdata_detailed_db()
        stmt = stmt.where(
            (column("datname") == bindparam("database")) &
            (column("queryid") == bindparam("query")))
        stmt = stmt.alias()
        from_clause = outerjoin(powa_statements, stmt,
                           and_(powa_statements.c.queryid == stmt.c.queryid, powa_statements.c.dbid == stmt.c.dbid))
        c = stmt.c
        rblk = mulblock(sum(c.shared_blks_read).label("shared_blks_read"))
        wblk = mulblock(sum(c.shared_blks_hit).label("shared_blks_hit"))
        stmt = (select([
            column("query"),
            sum(c.calls).label("calls"),
            sum(c.runtime).label("runtime"),
            rblk,
            wblk,
            (rblk + wblk).label("total_blks")])
            .select_from(from_clause)
            .where(powa_statements.c.queryid == bindparam("query"))
            .group_by(column("query"), bs))

        value = self.execute(stmt, params={
            "query": query,
            "database": database,
            "from": self.get_argument("from"),
            "to": self.get_argument("to")
        })
        if value.rowcount < 1:
            self.render("xhr.html", content="No data")
            return
        self.render("database/query/detail.html", stats=value.first())
示例#7
0
文件: base.py 项目: advatar/thevault
 def has_table(self, connection, table_name, schema=None):
     # seems like case gets folded in pg_class...
     if schema is None:
         cursor = connection.execute(
             sql.text(
             "select relname from pg_class c join pg_namespace n on "
             "n.oid=c.relnamespace where n.nspname=current_schema() and "
             "lower(relname)=:name",
             bindparams=[
                     sql.bindparam('name', unicode(table_name.lower()),
                     type_=sqltypes.Unicode)]
             )
         )
     else:
         cursor = connection.execute(
             sql.text(
             "select relname from pg_class c join pg_namespace n on "
             "n.oid=c.relnamespace where n.nspname=:schema and "
             "lower(relname)=:name",
                 bindparams=[
                     sql.bindparam('name', 
                     unicode(table_name.lower()), type_=sqltypes.Unicode),
                     sql.bindparam('schema', 
                     unicode(schema), type_=sqltypes.Unicode)] 
             )
         )
     return bool(cursor.first())
示例#8
0
def get_query(name, user_id=None):
    """Get the named pre-built query, sharding on user_id if given.

    This is a helper function to return an appropriate pre-built SQL query
    while taking sharding of the WBO table into account.  Call it with the
    name of the query and optionally the user_id on which to shard.
    """
    if user_id is None:
        table = wbo
    else:
        table = get_wbo_table(user_id)

    queries['ITEM_ID_COL_USER'] = and_(
        table.c.collection == bindparam('collection_id'),
        table.c.username == bindparam('user_id'),
        table.c.id == bindparam('item_id'),
        table.c.ttl > bindparam('ttl'))

    query = queries.get(name)
    if query is None:
        raise ValueError(name)

    if isinstance(query, str):
        if '%(wbo)s' in query:
            query = query % {'wbo': table.name}
        query = text(query)

    return query
示例#9
0
    def __init__(self, config):
        self._orm_engine = engine_from_config(config["database"]["orm"], prefix="")

        metadata = MetaData()

        aa = models.create_alarm_action_model(metadata).alias("aa")
        nm = models.create_notification_method_model(metadata).alias("nm")
        nmt = models.create_notification_method_type_model(metadata).alias("nmt")
        a = models.create_alarm_model(metadata).alias("a")

        self._orm_query = (
            select([nm.c.id, nm.c.type, nm.c.name, nm.c.address, nm.c.period])
            .select_from(aa.join(nm, aa.c.action_id == nm.c.id))
            .where(
                and_(
                    aa.c.alarm_definition_id == bindparam("alarm_definition_id"),
                    aa.c.alarm_state == bindparam("alarm_state"),
                )
            )
        )

        self._orm_get_alarm_state = select([a.c.state]).where(a.c.id == bindparam("alarm_id"))

        self._orm_nmt_query = select([nmt.c.name])

        self._orm_get_notification = select([nm.c.name, nm.c.type, nm.c.address, nm.c.period]).where(
            nm.c.id == bindparam("notification_id")
        )

        self._orm = None
示例#10
0
文件: base.py 项目: advatar/thevault
 def has_type(self, connection, type_name, schema=None):
     bindparams = [
         sql.bindparam('typname',
             unicode(type_name), type_=sqltypes.Unicode),
         sql.bindparam('nspname',
             unicode(schema), type_=sqltypes.Unicode),
         ]
     if schema is not None:
         query = """
         SELECT EXISTS (
             SELECT * FROM pg_catalog.pg_type t, pg_catalog.pg_namespace n
             WHERE t.typnamespace = n.oid
             AND t.typname = :typname
             AND n.nspname = :nspname
             )
             """
     else:
         query = """
         SELECT EXISTS (
             SELECT * FROM pg_catalog.pg_type t
             WHERE t.typname = :typname
             AND pg_type_is_visible(t.oid)
             )
             """
     cursor = connection.execute(sql.text(query, bindparams=bindparams))
     return bool(cursor.scalar())
示例#11
0
文件: base.py 项目: advatar/thevault
    def has_sequence(self, connection, sequence_name, schema=None):
        if schema is None:
            cursor = connection.execute(
                sql.text(
                    "SELECT relname FROM pg_class c join pg_namespace n on "
                    "n.oid=c.relnamespace where relkind='S' and "
                    "n.nspname=current_schema() "
                    "and lower(relname)=:name",
                    bindparams=[
                        sql.bindparam('name', unicode(sequence_name.lower()),
                        type_=sqltypes.Unicode)
                    ] 
                )
            )
        else:
            cursor = connection.execute(
                sql.text(
                "SELECT relname FROM pg_class c join pg_namespace n on "
                "n.oid=c.relnamespace where relkind='S' and "
                "n.nspname=:schema and lower(relname)=:name",
                bindparams=[
                    sql.bindparam('name', unicode(sequence_name.lower()),
                     type_=sqltypes.Unicode),
                    sql.bindparam('schema', 
                                unicode(schema), type_=sqltypes.Unicode)
                ]
            )
            )

        return bool(cursor.first())
示例#12
0
    def __init__(self):
        self._orm_engine = engine_from_config({
            'url': CONF.orm.url
        }, prefix='')

        metadata = MetaData()

        aa = models.create_alarm_action_model(metadata).alias('aa')
        nm = models.create_notification_method_model(metadata).alias('nm')
        nmt_insert = models.create_notification_method_type_model(metadata)
        nmt = nmt_insert.alias('nmt')
        a = models.create_alarm_model(metadata).alias('a')

        self._orm_query = select([nm.c.id, nm.c.type, nm.c.name, nm.c.address, nm.c.period])\
            .select_from(aa.join(nm, aa.c.action_id == nm.c.id))\
            .where(
                and_(aa.c.alarm_definition_id == bindparam('alarm_definition_id'),
                     aa.c.alarm_state == bindparam('alarm_state')))

        self._orm_get_alarm_state = select([a.c.state]).where(a.c.id == bindparam('alarm_id'))

        self._orm_nmt_query = select([nmt.c.name])

        self._orm_get_notification = select([nm.c.name, nm.c.type, nm.c.address, nm.c.period])\
            .where(nm.c.id == bindparam('notification_id'))

        self._orm_add_notification_type = insert(nmt_insert).values(name=bindparam('b_name'))

        self._orm = None
示例#13
0
def gdft_xhr(request):
    if request.method == 'POST':
        form = GenDev_FailType(request.POST)
        if form.is_valid():
            conn = engine.connect()


            d = select([device.brand_name, func.count(device.MDR_report_key),
                problem_code.problem_description, master_record.manufacturer_name],
                and_(
                    device.MDR_report_key==master_record.MDR_report_key,
                    master_record.event_type==bindparam("event_type"),
                    device_problem.MDR_report_key==master_record.MDR_report_key,
                    device_problem.problem_code_key==problem_code.device_problem_code,
                    device.generic_name.contains(bindparam("generic_name")),
                    master_record.date_recieved > bindparam("date_after"),
                    )).group_by(device_problem.problem_code_key)

            res = conn.execute(d, generic_name=form.cleaned_data["gendev"], event_type=form.cleaned_data["failtype"],
                    date_after=form.cleaned_data["date_after"])
            li = res.fetchall()
            li = sorted(li, key=lambda x: x[1], reverse=True)
            s= ""
            for i in li:
                s = s + str(i) + "<br>"
            return HttpResponse(s)

    return HttpResponse("Error in form")
示例#14
0
 def get(self, database, query, qual):
     stmt = qualstat_getstatdata()
     c = inner_cc(stmt)
     stmt = stmt.alias()
     stmt = (stmt.select()
         .where((c.qualid == bindparam("qualid")))
         .where(stmt.c.occurences > 0)
         .column((c.queryid == bindparam("query")).label("is_my_query")))
     quals = list(self.execute(
         stmt,
         params={"query": query,
                 "from": self.get_argument("from"),
                 "to": self.get_argument("to"),
                 "qualid": qual}))
     my_qual = None
     other_queries = {}
     for qual in quals:
         if qual['is_my_query']:
             my_qual = resolve_quals(self.connect(database=database),
                                     [qual])[0]
         else:
             other_queries[qual['queryid']] = qual['query']
     if my_qual is None:
         self.render("xhr.html", content="nodata")
         return
     self.render("database/query/qualdetail.html",
                 qual=my_qual,
                 database=database,
                 other_queries=other_queries)
示例#15
0
    def test_cloned_alias(self):
        entity = table(
            "entity", column("id"), column("employer_id"), column("name")
        )
        tag = table("tag", column("tag"), column("entity_id"))

        tags = (
            select([tag.c.entity_id, func.array_agg(tag.c.tag).label("tags")])
            .group_by(tag.c.entity_id)
            .cte("unaliased_tags")
        )

        entity_tags = tags.alias(name="entity_tags")
        employer_tags = tags.alias(name="employer_tags")

        q = (
            select([entity.c.name])
            .select_from(
                entity.outerjoin(
                    entity_tags, tags.c.entity_id == entity.c.id
                ).outerjoin(
                    employer_tags, tags.c.entity_id == entity.c.employer_id
                )
            )
            .where(entity_tags.c.tags.op("@>")(bindparam("tags")))
            .where(employer_tags.c.tags.op("@>")(bindparam("tags")))
        )

        self.assert_compile(
            q,
            "WITH unaliased_tags AS "
            "(SELECT tag.entity_id AS entity_id, array_agg(tag.tag) AS tags "
            "FROM tag GROUP BY tag.entity_id)"
            " SELECT entity.name "
            "FROM entity "
            "LEFT OUTER JOIN unaliased_tags AS entity_tags ON "
            "unaliased_tags.entity_id = entity.id "
            "LEFT OUTER JOIN unaliased_tags AS employer_tags ON "
            "unaliased_tags.entity_id = entity.employer_id "
            "WHERE (entity_tags.tags @> :tags) AND "
            "(employer_tags.tags @> :tags)",
        )

        cloned = q.params(tags=["tag1", "tag2"])
        self.assert_compile(
            cloned,
            "WITH unaliased_tags AS "
            "(SELECT tag.entity_id AS entity_id, array_agg(tag.tag) AS tags "
            "FROM tag GROUP BY tag.entity_id)"
            " SELECT entity.name "
            "FROM entity "
            "LEFT OUTER JOIN unaliased_tags AS entity_tags ON "
            "unaliased_tags.entity_id = entity.id "
            "LEFT OUTER JOIN unaliased_tags AS employer_tags ON "
            "unaliased_tags.entity_id = entity.employer_id "
            "WHERE (entity_tags.tags @> :tags) AND "
            "(employer_tags.tags @> :tags)",
        )
示例#16
0
    def delete_items(self, user_id, collection_name, item_ids=None,
                     filters=None, limit=None, offset=None, sort=None,
                     storage_time=None):
        """Deletes items. All items are removed unless item_ids is provided"""
        collection_id = self._get_collection_id(user_id, collection_name,
                                                create=False)
        if collection_id is None:
            return False

        wbo = self._get_wbo_table(user_id)
        query = _delete(wbo)
        where = [wbo.c.username == bindparam('user_id'),
                 wbo.c.collection == bindparam('collection_id')]

        if item_ids is not None:
            where.append(wbo.c.id.in_(item_ids))

        if filters is not None:
            for field, value in filters.items():
                field = getattr(wbo.c, field)

                operator, value = value
                if field.name == 'modified':
                    value = _roundedbigint(value)
                if isinstance(value, (list, tuple)):
                    where.append(field.in_(value))
                else:
                    if operator == '=':
                        where.append(field == value)
                    elif operator == '<':
                        where.append(field < value)
                    elif operator == '>':
                        where.append(field > value)

        where = and_(*where)
        query = query.where(where)

        if self.engine_name != 'sqlite':
            if sort is not None:
                if sort == 'oldest':
                    query = query.order_by(wbo.c.modified.asc())
                elif sort == 'newest':
                    query = query.order_by(wbo.c.modified.desc())
                else:
                    query = query.order_by(wbo.c.sortindex.desc())

            if limit is not None and int(limit) > 0:
                query = query.limit(int(limit))

            if offset is not None and int(offset) > 0:
                query = query.offset(int(offset))

        # XXX see if we want to send back more details
        # e.g. by checking the rowcount
        rowcount = self._do_query(query, user_id=user_id,
                                  collection_id=collection_id)
        return rowcount > 0
示例#17
0
def between(column):
    return sql.and_(
        sql.or_(
            column >= sql.bindparam("start_date"),
            sql.bindparam("start_date") == None),
        sql.or_(
            column <= sql.bindparam("end_date"),
            sql.bindparam("end_date") == None),
        )
示例#18
0
def upgrade():
    op.add_column('thumbnail', sa.Column('width', sa.Integer(), nullable=True))
    op.add_column('thumbnail', sa.Column('height', sa.Integer(), nullable=True))
    
    connection = op.get_bind()
    tn = Thumbnail.__table__
    result = connection.execute(select([column('id'), column('size')], from_obj=tn))
    w_h = [dict(tnid=r.id, w=int(r.size.split('x')[0]), h=int(r.size.split('x')[1])) for r in result]
    updt_stmt = tn.update().where(tn.c.id == bindparam('tnid')).values(width=bindparam('w'), height=bindparam('h'))
    connection.execute(updt_stmt, w_h)
示例#19
0
    def get_indexes(self, connection, tablename, schema=None, **kw):
        # using system catalogs, don't support index reflection
        # below MS 2005
        if self.server_version_info < MS_2005_VERSION:
            return []

        current_schema = schema or self.default_schema_name
        full_tname = "%s.%s" % (current_schema, tablename)

        rp = connection.execute(
            sql.text("select ind.index_id, ind.is_unique, ind.name "
                "from sys.indexes as ind join sys.tables as tab on "
                "ind.object_id=tab.object_id "
                "join sys.schemas as sch on sch.schema_id=tab.schema_id "
                "where tab.name = :tabname "
                "and sch.name=:schname "
                "and ind.is_primary_key=0", 
                bindparams=[
                    sql.bindparam('tabname', tablename, 
                                    sqltypes.String(convert_unicode=True)),
                    sql.bindparam('schname', current_schema, 
                                    sqltypes.String(convert_unicode=True))
                ]
            )
        )
        indexes = {}
        for row in rp:
            indexes[row['index_id']] = {
                'name':row['name'],
                'unique':row['is_unique'] == 1,
                'column_names':[]
            }
        rp = connection.execute(
            sql.text(
                "select ind_col.index_id, ind_col.object_id, col.name "
                "from sys.columns as col "
                "join sys.tables as tab on tab.object_id=col.object_id "
                "join sys.index_columns as ind_col on "
                "(ind_col.column_id=col.column_id and "
                "ind_col.object_id=tab.object_id) "
                "join sys.schemas as sch on sch.schema_id=tab.schema_id "
                "where tab.name=:tabname "
                "and sch.name=:schname",
                        bindparams=[
                            sql.bindparam('tabname', tablename, 
                                    sqltypes.String(convert_unicode=True)),
                            sql.bindparam('schname', current_schema, 
                                    sqltypes.String(convert_unicode=True))
                        ]),
            )
        for row in rp:
            if row['index_id'] in indexes:
                indexes[row['index_id']]['column_names'].append(row['name'])

        return indexes.values()
示例#20
0
    def test_compare_binds(self):
        b1 = bindparam("foo", type_=Integer())
        b2 = bindparam("foo", type_=Integer())
        b3 = bindparam("bar", type_=Integer())
        b4 = bindparam("foo", type_=String())

        def c1():
            return 5

        def c2():
            return 6

        b5 = bindparam("foo", type_=Integer(), callable_=c1)
        b6 = bindparam("foo", type_=Integer(), callable_=c2)
        b7 = bindparam("foo", type_=Integer(), callable_=c1)

        b8 = bindparam("foo", type_=Integer, value=5)
        b9 = bindparam("foo", type_=Integer, value=6)

        is_false(b1.compare(b5))
        is_true(b5.compare(b7))
        is_false(b5.compare(b6))
        is_true(b1.compare(b2))

        # currently not comparing "key", as we often have to compare
        # anonymous names.  however we should really check for that
        # is_true(b1.compare(b3))

        is_false(b1.compare(b4))
        is_false(b1.compare(b8))
        is_false(b8.compare(b9))
        is_true(b8.compare(b8))
示例#21
0
    def test_cloned_alias(self):
        entity = table(
            'entity', column('id'), column('employer_id'), column('name'))
        tag = table('tag', column('tag'), column('entity_id'))

        tags = select([
            tag.c.entity_id,
            func.array_agg(tag.c.tag).label('tags'),
        ]).group_by(tag.c.entity_id).cte('unaliased_tags')

        entity_tags = tags.alias(name='entity_tags')
        employer_tags = tags.alias(name='employer_tags')

        q = (
            select([entity.c.name])
            .select_from(
                entity
                .outerjoin(entity_tags, tags.c.entity_id == entity.c.id)
                .outerjoin(employer_tags,
                           tags.c.entity_id == entity.c.employer_id)
            )
            .where(entity_tags.c.tags.op('@>')(bindparam('tags')))
            .where(employer_tags.c.tags.op('@>')(bindparam('tags')))
        )

        self.assert_compile(
            q,
            'WITH unaliased_tags AS '
            '(SELECT tag.entity_id AS entity_id, array_agg(tag.tag) AS tags '
            'FROM tag GROUP BY tag.entity_id)'
            ' SELECT entity.name '
            'FROM entity '
            'LEFT OUTER JOIN unaliased_tags AS entity_tags ON '
            'unaliased_tags.entity_id = entity.id '
            'LEFT OUTER JOIN unaliased_tags AS employer_tags ON '
            'unaliased_tags.entity_id = entity.employer_id '
            'WHERE (entity_tags.tags @> :tags) AND '
            '(employer_tags.tags @> :tags)'
        )

        cloned = q.params(tags=['tag1', 'tag2'])
        self.assert_compile(
            cloned,
            'WITH unaliased_tags AS '
            '(SELECT tag.entity_id AS entity_id, array_agg(tag.tag) AS tags '
            'FROM tag GROUP BY tag.entity_id)'
            ' SELECT entity.name '
            'FROM entity '
            'LEFT OUTER JOIN unaliased_tags AS entity_tags ON '
            'unaliased_tags.entity_id = entity.id '
            'LEFT OUTER JOIN unaliased_tags AS employer_tags ON '
            'unaliased_tags.entity_id = entity.employer_id '
            'WHERE (entity_tags.tags @> :tags) AND '
            '(employer_tags.tags @> :tags)')
示例#22
0
    def delete_stmt():
        clause = sql.and_()
        for col in mapper._pks_by_table[table]:
            clause.clauses.append(col == sql.bindparam(col.key, type_=col.type))

        if need_version_id:
            clause.clauses.append(
                mapper.version_id_col == sql.bindparam(mapper.version_id_col.key, type_=mapper.version_id_col.type)
            )

        return table.delete(clause)
示例#23
0
    def _get_colparams(self, stmt, default_params):
        """Organize ``UPDATE``/``INSERT`` ``SET``/``VALUES`` parameters into a list of tuples.

        Each tuple will contain the ``Column`` and a ``ClauseElement``
        representing the value to be set (usually a ``_BindParamClause``,
        but could also be other SQL expressions.)

        The list of tuples will determine the columns that are
        actually rendered into the ``SET``/``VALUES`` clause of the
        rendered ``UPDATE``/``INSERT`` statement.  It will also
        determine how to generate the list/dictionary of bind
        parameters at execution time (i.e. ``get_params()``).

        This list takes into account the `values` keyword specified
        to the statement, the parameters sent to this Compiled
        instance, and the default bind parameter values corresponding
        to the dialect's behavior for otherwise unspecified primary
        key columns.
        """

        # no parameters in the statement, no parameters in the
        # compiled params - return binds for all columns
        if self.parameters is None and stmt.parameters is None:
            return [(c, sql.bindparam(c.key, type=c.type)) for c in stmt.table.columns]

        def to_col(key):
            if not isinstance(key, sql._ColumnClause):
                return stmt.table.columns.get(unicode(key), key)
            else:
                return key

        # if we have statement parameters - set defaults in the
        # compiled params
        if self.parameters is None:
            parameters = {}
        else:
            parameters = dict([(to_col(k), v) for k, v in self.parameters.iteritems()])

        if stmt.parameters is not None:
            for k, v in stmt.parameters.iteritems():
                parameters.setdefault(to_col(k), v)

        for k, v in default_params.iteritems():
            parameters.setdefault(to_col(k), v)

        # create a list of column assignment clauses as tuples
        values = []
        for c in stmt.table.columns:
            if parameters.has_key(c):
                value = parameters[c]
                if sql._is_literal(value):
                    value = sql.bindparam(c.key, value, type=c.type, unique=True)
                values.append((c, value))
        return values
示例#24
0
    def update_stmt():
        clause = sql.and_()

        for col in mapper._pks_by_table[table]:
            clause.clauses.append(col == sql.bindparam(col._label,
                                            type_=col.type))

        if needs_version_id:
            clause.clauses.append(mapper.version_id_col ==\
                    sql.bindparam(mapper.version_id_col._label,
                                    type_=col.type))

        return table.update(clause)
示例#25
0
def upgrade():
    ### commands auto generated by Alembic - please adjust! ###
    op.add_column('stored_file', sa.Column('mimetype', sa.Unicode(length=32), nullable=False,
            server_default=sa.text("''")))
    op.alter_column('stored_file', 'mimetype', server_default=None)
    ### end Alembic commands ###

    connection = op.get_bind()
    sf = StoredFile.__table__
    result = connection.execute(select([sf.c.id, sf.c.title]))
    mimetypes = [dict(sfid=r[0], mimetype=guess_type(r[1])[0]) for r in result]
    if len(mimetypes) > 0:
        updt_stmt = sf.update().where(sf.c.id == bindparam('sfid')).values(mimetype=bindparam('mimetype'))
        connection.execute(updt_stmt, mimetypes)
示例#26
0
 def _initialize_queries(self):
     #        self._sql_get_dist = select([Distance.__table__.c.dist],
     #               or_(and_(Distance.__table__.c._minimum1_id==bindparam("id1"), 
     #                        Distance.__table__.c._minimum2_id==bindparam("id2")),
     #                   and_(Distance.__table__.c._minimum1_id==bindparam("id2"), 
     #                        Distance.__table__.c._minimum2_id==bindparam("id1")),
     #               ), use_labels=False).limit(1)
     tbl =  Distance.__table__.c
     self._sql_get_dist = select([tbl.dist],and_(
                              tbl._minimum1_id==bindparam("id1"), 
                              tbl._minimum2_id==bindparam("id2")
                              ), use_labels=False).limit(1)
                              
     #self._sql_set_dist = Distance.__table__.insert().values(_minimum1_id=bindparam("id1"),_minimum2_id=bindparam("id2"), dist=bindparam("dist"))
     self._sql_set_dist = "INSERT OR REPLACE INTO tbl_distances (dist, _minimum1_id, _minimum2_id) VALUES (:dist, :id1, :id2)"
    def __init__(self, config):
        self._orm_engine = engine_from_config(config['database']['orm'], prefix='')

        metadata = MetaData()

        aa = models.create_alarm_action_model(metadata).alias('aa')
        nm = models.create_notification_method_model(metadata).alias('nm')

        self._orm_query = select([nm.c.name, nm.c.type, nm.c.address])\
            .select_from(aa.join(nm, aa.c.action_id == nm.c.id))\
            .where(
                and_(aa.c.alarm_definition_id == bindparam('alarm_definition_id'),
                     aa.c.alarm_state == bindparam('alarm_state')))

        self._orm = None
示例#28
0
        def visit_binary(binary):
            leftcol = binary.left
            rightcol = binary.right

            equated_columns[rightcol] = leftcol
            equated_columns[leftcol] = rightcol

            if should_bind(leftcol, rightcol):
                if leftcol not in binds:
                    binds[leftcol] = sql.bindparam(None, None, type_=binary.right.type)
                binary.left = binds[leftcol]
            elif should_bind(rightcol, leftcol):
                if rightcol not in binds:
                    binds[rightcol] = sql.bindparam(None, None, type_=binary.left.type)
                binary.right = binds[rightcol]
示例#29
0
    def returning_clause(self, stmt, returning_cols):
        self.returning_cols = list(
            expression._select_iterables(returning_cols))

        # within_columns_clause=False so that labels (foo AS bar) don't render
        columns = [
            self.process(c,
                         within_columns_clause=False,
                         result_map=self.result_map)
            for c in self.returning_cols
        ]

        if not hasattr(self, 'returning_parameters'):
            self.returning_parameters = []

        binds = []
        for i, col in enumerate(self.returning_cols):
            dbtype = col.type.dialect_impl(self.dialect).get_dbapi_type(
                self.dialect.dbapi)
            self.returning_parameters.append((i + 1, dbtype))

            bindparam = sql.bindparam("ret_%d" % i,
                                      value=ReturningParam(dbtype))
            self.binds[bindparam.key] = bindparam
            binds.append(
                self.bindparam_string(self._truncate_bindparam(bindparam)))

        return 'RETURNING ' + ', '.join(columns) + " INTO " + ", ".join(binds)
示例#30
0
    def delete_stmt():
        clause = sql.and_()
        for col in mapper._pks_by_table[table]:
            clause.clauses.append(
                    col == sql.bindparam(col.key, type_=col.type))

        if need_version_id:
            clause.clauses.append(
                mapper.version_id_col == 
                sql.bindparam(
                        mapper.version_id_col.key, 
                        type_=mapper.version_id_col.type
                )
            )

        return table.delete(clause)
示例#31
0
def dimension_id_lookup_func(value, dim_table_name, key_field_name,
                             value_field_name, **kwargs):
    if not value:
        return None

    else:
        db_schema = 'olap'
        raw_template = """
            SELECT {field}
            from {schema}.{table}
            where {dim_table_value_field_name} = :source_value""".format(
            schema=db_schema,
            field=key_field_name,
            table=dim_table_name,
            dim_table_value_field_name=value_field_name)
        template = text(raw_template)
        stmt = template.bindparams(bindparam('source_value', String))

        data_mgr = kwargs['persistence_manager']
        dbconnection = data_mgr.database.engine.connect()
        # db_session = data_mgr.getSession()

        result = dbconnection.execute(stmt, {"source_value": value})
        record = result.fetchone()
        if not record:
            raise Exception(
                'returned empty result set from query: %s where value is %s' %
                (str(stmt), value))

        return record[0]
示例#32
0
文件: qual.py 项目: maito26/powa-web
 def query(self):
     query = (qual_constants("most_used",
                             text("""
         datname = :database AND
         s.queryid = :query AND
         qn.qualid = :qual AND
         coalesce_range && tstzrange(:from, :to)"""),
                             top=10))
     base = qualstat_getstatdata()
     c = inner_cc(base)
     base = base.where(c.queryid == bindparam("query")).alias()
     totals = (
         base.select().where((c.qualid == bindparam("qual"))
                             & (c.queryid == bindparam("query")))).alias()
     return (query.alias().select().column(
         totals.c.occurences.label('total_occurences')).correlate(query))
示例#33
0
    def __init__(self, config):
        self._orm_engine = engine_from_config(config['database']['orm'],
                                              prefix='')

        metadata = MetaData()

        aa = models.create_alarm_action_model(metadata).alias('aa')
        nm = models.create_notification_method_model(metadata).alias('nm')

        self._orm_query = select([nm.c.name, nm.c.type, nm.c.address])\
            .select_from(aa.join(nm, aa.c.action_id == nm.c.id))\
            .where(
                and_(aa.c.alarm_definition_id == bindparam('alarm_definition_id'),
                     aa.c.alarm_state == bindparam('alarm_state')))

        self._orm = None
    def get_table_oid(self, connection, table_name, schema=None, **kw):
        """Fetch the oid for schema.table_name.

        Several reflection methods require the table oid.  The idea for using
        this method is that it can be fetched one time and cached for
        subsequent calls.

        """
        table_oid = None
        if schema is not None:
            schema_where_clause = "n.nspname = :schema"
        else:
            schema_where_clause = "sys_catalog.sys_table_is_visible(c.oid)"
        query = ("""
                SELECT c.oid
                FROM sys_catalog.sys_class c
                LEFT JOIN sys_catalog.sys_namespace n ON n.oid = c.relnamespace
                WHERE (%s)
                AND c.relname = :table_name AND c.relkind in
                ('r', 'v', 'm', 'f', 'p')
            """ % schema_where_clause)
        # Since we're binding to unicode, table_name and schema_name must be
        # unicode.
        table_name = util.text_type(table_name)
        if schema is not None:
            schema = util.text_type(schema)
        s = sql.text(query).bindparams(table_name=sqltypes.Unicode)
        s = s.columns(oid=sqltypes.Integer)
        if schema:
            s = s.bindparams(sql.bindparam("schema", type_=sqltypes.Unicode))
        c = connection.execute(s, table_name=table_name, schema=schema)
        table_oid = c.scalar()
        if table_oid is None:
            raise exc.NoSuchTableError(table_name)
        return table_oid
示例#35
0
    def associate(self, collection, refs):
        # Docstring inherited from Registry.associate.

        # Most SqlRegistry subclass implementations should replace this
        # implementation with special "UPSERT" or "MERGE" syntax.  This
        # implementation is only concurrency-safe for databases that implement
        # transactions with database- or table-wide locks (e.g. SQLite).

        datasetCollectionTable = self._schema.tables["dataset_collection"]
        insertQuery = datasetCollectionTable.insert()
        checkQuery = select([datasetCollectionTable.c.dataset_id], whereclause=and_(
            datasetCollectionTable.c.collection == collection,
            datasetCollectionTable.c.dataset_ref_hash == bindparam("hash")))

        for ref in refs:
            if ref.id is None:
                raise AmbiguousDatasetError(f"Cannot associate dataset {ref} without ID.")

            try:
                with self.transaction():
                    self._connection.execute(insertQuery, {"dataset_id": ref.id, "dataset_ref_hash": ref.hash,
                                                           "collection": collection})
            except IntegrityError as exc:
                # Did we clash with a completely duplicate entry (because this
                # dataset is already in this collection)?  Or is there already
                # a different dataset with the same DatasetType and data ID in
                # this collection?  Only the latter is an error.
                row = self._connection.execute(checkQuery, hash=ref.hash).fetchone()
                if row.dataset_id != ref.id:
                    raise ConflictingDefinitionError(
                        "A dataset of type {} with id: {} already exists in collection {}".format(
                            ref.datasetType, ref.dataId, collection
                        )
                    ) from exc
            self.associate(collection, ref.components.values())
示例#36
0
 def delete_obj(self, objects, uow):
     """called by a UnitOfWork object to delete objects, which involves a
     DELETE statement for each table used by this mapper, for each object in the list."""
     for table in self.tables:
         if not self._has_pks(table):
             continue
         delete = []
         for obj in objects:
             params = {}
             if not hasattr(obj, "_instance_key"):
                 continue
             else:
                 delete.append(params)
             for col in self.pks_by_table[table]:
                 params[col.key] = self._getattrbycolumn(obj, col)
             self.extension.before_delete(self, obj)
         if len(delete):
             clause = sql.and_()
             for col in self.pks_by_table[table]:
                 clause.clauses.append(col == sql.bindparam(col.key))
             statement = table.delete(clause)
             c = statement.execute(*delete)
             if table.engine.supports_sane_rowcount(
             ) and c.rowcount != len(delete):
                 raise "ConcurrencyError - updated rowcount %d does not match number of objects updated %d" % (
                     c.cursor.rowcount, len(delete))
示例#37
0
 def query(self):
     # Working from the statdata detailed_db base query
     inner_query = powa_getstatdata_detailed_db()
     inner_query = inner_query.alias()
     c = inner_query.c
     ps = powa_statements
     # Multiply each measure by the size of one block.
     columns = [
         c.queryid, ps.c.query,
         sum(c.calls).label("calls"),
         sum(c.runtime).label("runtime"),
         sum(mulblock(c.shared_blks_read)).label("shared_blks_read"),
         sum(mulblock(c.shared_blks_hit)).label("shared_blks_hit"),
         sum(mulblock(c.shared_blks_dirtied)).label("shared_blks_dirtied"),
         sum(mulblock(c.shared_blks_written)).label("shared_blks_written"),
         sum(mulblock(c.temp_blks_read)).label("temp_blks_read"),
         sum(mulblock(c.temp_blks_written)).label("temp_blks_written"),
         (sum(c.runtime) / greatest(sum(c.calls), 1)).label("avg_runtime"),
         sum(c.blk_read_time).label("blks_read_time"),
         sum(c.blk_write_time).label("blks_write_time")
     ]
     from_clause = inner_query.join(ps, (ps.c.queryid == c.queryid) &
                                    (ps.c.userid == c.userid) &
                                    (ps.c.dbid == c.dbid))
     return (select(columns).select_from(from_clause).where(
         c.datname == bindparam("database")).group_by(
             c.queryid, ps.c.query).order_by(sum(c.runtime).desc()))
示例#38
0
    def has_table(self, connection, tablename, schema=None):
        if schema is None:
            schema = self.default_schema_name

        result = connection.execute(
                    text("select sysobjects.name from sysobjects, sysusers "
                         "where sysobjects.uid=sysusers.uid and "
                         "sysobjects.name=:tablename and "
                         "sysusers.name=:schemaname and "
                         "sysobjects.type='U'",
                         bindparams=[
                                  bindparam('tablename', tablename),
                                  bindparam('schemaname', schema)
                                  ])
                 )
        return result.scalar() is not None
示例#39
0
    def __init__(self, class_or_mapper, session=None, entity_name=None, lockmode=None, with_options=None, extension=None, **kwargs):
        if isinstance(class_or_mapper, type):
            self.mapper = mapper.class_mapper(class_or_mapper, entity_name=entity_name)
        else:
            self.mapper = class_or_mapper.compile()
        self.with_options = with_options or []
        self.select_mapper = self.mapper.get_select_mapper().compile()
        self.always_refresh = kwargs.pop('always_refresh', self.mapper.always_refresh)
        self.lockmode = lockmode
        self.extension = mapper._ExtensionCarrier()
        if extension is not None:
            self.extension.append(extension)
        self.extension.append(self.mapper.extension)
        self.is_polymorphic = self.mapper is not self.select_mapper
        self._session = session
        if not hasattr(self.mapper, '_get_clause'):
            _get_clause = sql.and_()
            for primary_key in self.primary_key_columns:
                _get_clause.clauses.append(primary_key == sql.bindparam(primary_key._label, type=primary_key.type, unique=True))
            self.mapper._get_clause = _get_clause
            
        self._entities = []
        self._get_clause = self.mapper._get_clause

        self._order_by = kwargs.pop('order_by', False)
        self._group_by = kwargs.pop('group_by', False)
        self._distinct = kwargs.pop('distinct', False)
        self._offset = kwargs.pop('offset', None)
        self._limit = kwargs.pop('limit', None)
        self._criterion = None
        self._joinpoint = self.mapper
        self._from_obj = [self.table]

        for opt in util.flatten_iterator(self.with_options):
            opt.process_query(self)
示例#40
0
文件: sql.py 项目: KabyleAI/Search
def get_titles(article_ids, engine):
    """Get article titles from the SQL database.

    Parameters
    ----------
    article_ids : iterable of int
        An iterable of article IDs.
    engine : sqlalchemy.engine.Engine
        SQLAlchemy Engine connected to the database.

    Returns
    -------
    titles : dict
        Dictionary mapping article IDs to the article titles.
    """
    if len(article_ids) == 0:
        return {}

    query = sql.text("""SELECT article_id, title
        FROM articles
        WHERE article_id IN :article_ids
        """)
    query = query.bindparams(sql.bindparam("article_ids", expanding=True))

    with engine.begin() as connection:
        response = connection.execute(query, {
            "article_ids": article_ids
        }).fetchall()
        titles = {article_id: title for article_id, title in response}

    return titles
示例#41
0
async def get_current_user(
        access_token: str = Depends(get_access_token),
        setting=Depends(get_setting),
        redis=Depends(get_redis(redisDBenum.session.value)),
):
    user_id = await redis.get(access_token)
    if user_id is None:
        raise HTTPException(
            status_code=status.HTTP_403_FORBIDDEN,
            detail="Could not validate credentials",
        )

    user_id = int(user_id)  # cast in int because redis stock int as bytes
    query = str(
        select([user.c.username,
                user.c.email]).where(user.c.id == bindparam("user_id")))
    result = await db.fetch_one(query=query, values={"user_id": user_id})
    if result is None:
        raise HTTPException(
            status_code=status.HTTP_403_FORBIDDEN,
            detail="Could not validate credentials",
        )
    await redis.expire(access_token, setting.session_ttl)
    username, email = result.values()
    return UserInDB(id=user_id, username=username, email=email)
示例#42
0
def find_sql_context(session_id, zodb_path):
    """Find the closest SQL tree node for a candidate path.
    The path has to be given as a list of path entries. The session
    timestamp is only used as part of a cache key for this method.
    The return value is the id of the SQL tree node. All consumed
    entries will be removed from the zodb_path list.
    XXX This uses the path ghost and is obsolete
    """
    # Pop all integer elements from the URL
    path = ""
    head = []
    while zodb_path:
        next = zodb_path.pop()
        if len(next) > 3:
            zodb_path.append(next)
            break

        try:
            path += "%03d" % int(next)
            head.append(next)
        except ValueError:
            zodb_path.append(next)
            break

    # Try and find a SQL tree node that matches our URL
    query = (Session.query(SurveyTreeItem).filter(
        SurveyTreeItem.session_id == session_id).filter(
            SurveyTreeItem.path == sql.bindparam("path")))
    while path:
        node = query.params(path=path).first()
        if node is not None:
            return node
        path = path[:-3]
        zodb_path.append(head.pop())
示例#43
0
    def get_table_oid(self, connection, table_name, schema=None, **kw):
        """Fetch the oid for schema.table_name.

        Several reflection methods require the table oid.  The idea for using
        this method is that it can be fetched one time and cached for
        subsequent calls.

        """
        table_oid = None
        if schema is not None:
            schema_where_clause = "schema = :schema"
        else:
            schema_where_clause = "1=1"
        query = ("""
                SELECT * FROM _V_TABLE
                WHERE (%s)
                AND tablename = :table_name 
            """ % schema_where_clause)
        # Since we're binding to unicode, table_name and schema_name must be
        # unicode.
        table_name = util.text_type(table_name)
        if schema is not None:
            schema = util.text_type(schema)
        s = sql.text(query).bindparams(table_name=sqltypes.Unicode)
        s = s.columns(oid=sqltypes.Integer)
        if schema:
            s = s.bindparams(sql.bindparam("schema", type_=sqltypes.Unicode))
        c = connection.execute(s, table_name=table_name, schema=schema)
        table_oid = c.scalar()
        if table_oid is None:
            raise exc.NoSuchTableError(table_name)
        return table_oid
示例#44
0
    def _create_table_version(cls, engine, repository, version):
        """
        Creates the versioning table in a database.

        :raises: :exc:`DatabaseAlreadyControlledError`
        """
        # Create tables
        tname = repository.version_table
        meta = MetaData(engine)

        table = Table(
            tname,
            meta,
            Column('repository_id', String(250), primary_key=True),
            Column('repository_path', Text),
            Column('version', Integer),
        )

        # there can be multiple repositories/schemas in the same db
        if not table.exists():
            table.create()

        # test for existing repository_id
        s = table.select(table.c.repository_id == bindparam("repository_id"))
        result = engine.execute(s, repository_id=repository.id)
        if result.fetchone():
            raise exceptions.DatabaseAlreadyControlledError

        # Insert data
        engine.execute(table.insert().values(repository_id=repository.id,
                                             repository_path=repository.path,
                                             version=int(version)))
        return table
示例#45
0
 def build_tree_parents(layer3domain, version):
     tree, new_parents = Ipblock.build_tree_mem(layer3domain, version)
     if new_parents:
         logging.warning(
             '%d wrong parents found during tree rebuild in layer3domain %s version %d'
             % (len(new_parents), layer3domain.name, version))
         update = Ipblock.__table__.update() \
             .where(Ipblock.id == bindparam('_id')) \
             .values(parent_id=bindparam('_parent'))
         db.session.execute(update,
                            params=[
                                dict(_id=id, _parent=parent)
                                for id, parent in new_parents
                            ])
         db.session.expire_all()
     return tree
示例#46
0
def upgrade():
    ### commands auto generated by Alembic - please adjust! ###
    op.add_column('client', sa.Column('namespace', sa.Unicode(length=250), nullable=True))
    op.create_unique_constraint('client_namespace_key', 'client', ['namespace'])
    ### end Alembic commands ###

    connection = op.get_bind()
    client = table('client',
        column('id', sa.Integer),
        column('website', sa.Unicode(250)),
        column('namespace', sa.Unicode(250)))
    results = connection.execute(select([client.c.id, client.c.website]))
    namespaces = []
    namespace_list = []
    for r in results:
        new_namespace = namespace = namespace_from_url(r[1])
        append_count = 0
        while new_namespace in namespace_list:
            append_count = append_count + 1
            new_namespace = "%s%s" % (namespace, append_count)
        namespaces.append({'clientid': r[0], 'namespace': new_namespace})
        namespace_list.append(new_namespace)

    if len(namespaces) > 0:
        updt_stmt = client.update().where(client.c.id == bindparam('clientid')).values(namespace=bindparam('namespace'))
        connection.execute(updt_stmt, namespaces)
示例#47
0
 def has_table(self, connection, tablename, schema=None):
     if schema is None:
         schema = self.default_schema_name
     if '.' not in schema:
         schema += '.'
     catalog, schema = schema.split('.', 1)
     result = connection.execute(
         text(
             "SELECT TABLE_NAME FROM DB..TABLES WHERE "
             "TABLE_CATALOG=:schemaname AND "
             "TABLE_NAME=:tablename",
             bindparams=[
                 bindparam("schemaname", catalog),
                 bindparam("tablename", tablename)
             ]))
     return result.scalar() is not None
示例#48
0
    def _get_final_molecules(self, optimization_ids=None):

        if optimization_ids is None:
            self._raise_missing_attribute("final_molecule",
                                          "List of optimizations ids")

        sql_statement = text("""
                select opt.id as opt_id, molecule.* from molecule
                join optimization_procedure as opt
                on molecule.id = opt.final_molecule
                where opt.id in :optimization_ids
        """)

        # bind and expand ids list
        sql_statement = sql_statement.bindparams(
            bindparam("optimization_ids", expanding=True))

        # column types:
        columns = inspect(MoleculeORM).columns
        sql_statement = sql_statement.columns(opt_id=Integer, *columns)
        query_result = self.execute_query(
            sql_statement, optimization_ids=list(optimization_ids))

        ret = {}
        for rec in query_result:
            self._remove_excluded_keys(rec)
            key = rec.pop("opt_id")
            rec = {k: v for k, v in rec.items() if v is not None}
            ret[key] = Molecule(**rec)

        return ret
示例#49
0
    def _create_table_version(cls, engine, repository, version):
        """
        Creates the versioning table in a database.

        :raises: :exc:`DatabaseAlreadyControlledError`
        """
        # Create tables
        tname = repository.version_table
        meta = MetaData(engine)

        table = Table(
            tname,
            meta,
            Column("repository_id", String(250), primary_key=True),
            Column("repository_path", Text),
            Column("version", Integer),
        )

        # there can be multiple repositories/schemas in the same db
        if not table.exists():
            table.create()

        # test for existing repository_id
        s = table.select(table.c.repository_id == bindparam("repository_id"))
        result = engine.execute(s, repository_id=repository.id)
        if result.fetchone():
            raise exceptions.DatabaseAlreadyControlledError

        # Insert data
        engine.execute(
            table.insert().values(repository_id=repository.id, repository_path=repository.path, version=int(version))
        )
        return table
示例#50
0
def find_sql_context(session_id, zodb_path):
    """Find the closest SQL tree node for a candidate path.

    The path has to be given as a list of path entries. The session
    timestamp is only used as part of a cache key for this method.

    The return value is the id of the SQL tree node. All consumed
    entries will be removed from the zodb_path list.
    """
    # Pop all integer elements from the URL
    path = ""
    head = []
    while zodb_path:
        next = zodb_path.pop()
        if len(next) > 3:
            zodb_path.append(next)
            break

        try:
            path += '%03d' % int(next)
            head.append(next)
        except ValueError:
            zodb_path.append(next)
            break

    # Try and find a SQL tree node that matches our URL
    query = Session.query(model.SurveyTreeItem.id).\
            filter(model.SurveyTreeItem.session_id == session_id).\
            filter(model.SurveyTreeItem.path == sql.bindparam('path'))
    while path:
        node = query.params(path=path).first()
        if node is not None:
            return node[0]
        path = path[:-3]
        zodb_path.append(head.pop())
    def get_user_info(self, user, attrs):
        """Returns user info

        Args:
            user: the user object
            attrs: the pieces of data requested

        Returns:
            user object populated with attrs
        """
        user_id = self.get_user_id(user)
        if user_id is None:
            return user

        attrs = [attr for attr in attrs if not user.get(attr)]
        if attrs == []:
            return user

        fields = []
        for attr in attrs:
            fields.append(getattr(users.c, attr))

        _USER_INFO = select(fields, users.c.userid == bindparam('user_id'))
        res = safe_execute(self._engine, _USER_INFO,
                           user_id=user_id).fetchone()
        if res is None:
            return user
        for attr in attrs:
            try:
                user[attr] = getattr(res, attr)
            except AttributeError:
                user[attr] = None

        return user
示例#52
0
    def test_cloned_alias(self):
        entity = table('entity', column('id'), column('employer_id'),
                       column('name'))
        tag = table('tag', column('tag'), column('entity_id'))

        tags = select([
            tag.c.entity_id,
            func.array_agg(tag.c.tag).label('tags'),
        ]).group_by(tag.c.entity_id).cte('unaliased_tags')

        entity_tags = tags.alias(name='entity_tags')
        employer_tags = tags.alias(name='employer_tags')

        q = (select([entity.c.name]).select_from(
            entity.outerjoin(
                entity_tags, tags.c.entity_id == entity.c.id).outerjoin(
                    employer_tags,
                    tags.c.entity_id == entity.c.employer_id)).where(
                        entity_tags.c.tags.op('@>')(bindparam('tags'))).where(
                            employer_tags.c.tags.op('@>')(bindparam('tags'))))

        self.assert_compile(
            q, 'WITH unaliased_tags AS '
            '(SELECT tag.entity_id AS entity_id, array_agg(tag.tag) AS tags '
            'FROM tag GROUP BY tag.entity_id)'
            ' SELECT entity.name '
            'FROM entity '
            'LEFT OUTER JOIN unaliased_tags AS entity_tags ON '
            'unaliased_tags.entity_id = entity.id '
            'LEFT OUTER JOIN unaliased_tags AS employer_tags ON '
            'unaliased_tags.entity_id = entity.employer_id '
            'WHERE (entity_tags.tags @> :tags) AND '
            '(employer_tags.tags @> :tags)')

        cloned = q.params(tags=['tag1', 'tag2'])
        self.assert_compile(
            cloned, 'WITH unaliased_tags AS '
            '(SELECT tag.entity_id AS entity_id, array_agg(tag.tag) AS tags '
            'FROM tag GROUP BY tag.entity_id)'
            ' SELECT entity.name '
            'FROM entity '
            'LEFT OUTER JOIN unaliased_tags AS entity_tags ON '
            'unaliased_tags.entity_id = entity.id '
            'LEFT OUTER JOIN unaliased_tags AS employer_tags ON '
            'unaliased_tags.entity_id = entity.employer_id '
            'WHERE (entity_tags.tags @> :tags) AND '
            '(employer_tags.tags @> :tags)')
    def invalidate_overdue_reservations(self):
        connection = self.__data_access_layer.connection
        rental_log = self.__data_access_layer.rental_log
        copy = self.__data_access_layer.copy

        connection = connection.execution_options(
            isolation_level="SERIALIZABLE")

        items = None
        with connection.begin():
            select_stmt = (select([
                copy.c.id,
                copy.c.library_item_id,
                rental_log.c.id,
            ]).select_from(copy.join(rental_log)).where(
                rental_log.c.book_status == BookStatus.RESERVED).where(
                    rental_log.c._reservation_end <= datetime.utcnow()))

            debug('Executing: \n{}'.format(str(select_stmt)))

            items = connection.execute(select_stmt).fetchall()

            if not items:
                debug('No reservations to cancel.')
                return

            bind_items = [{
                'copy_id': item[0],
                'renatl_log_id': item[2]
            } for item in items]

            update_rental_log_stmt = (rental_log.update().where(
                rental_log.c.id == bindparam('renatl_log_id')).values(
                    book_status=BookStatus.RETURNED))

            debug('Executing: \n{}'.format(str(update_rental_log_stmt)))
            connection.execute(update_rental_log_stmt, bind_items)

            update_copy_stmt = (copy.update().where(
                copy.c.id == bindparam('copy_id')).values(
                    available_status=BookStatus.RETURNED))

            debug('Executing: \n{}'.format(str(update_copy_stmt)))
            connection.execute(update_copy_stmt, bind_items)

        info("[{}] Cancelled reservation for library item id: {}".format(
            datetime.now(), ', '.join([str(item[1]) for item in items])))
示例#54
0
    def get_indexes(self, connection, tablename, schema=None, **kw):
        # using system catalogs, don't support index reflection
        # below MS 2005
        if self.server_version_info < MS_2005_VERSION:
            return []

        current_schema = schema or self.default_schema_name
        full_tname = "%s.%s" % (current_schema, tablename)

        rp = connection.execute(
            sql.text(
                "select ind.index_id, ind.is_unique, ind.name "
                "from sys.indexes as ind join sys.tables as tab on "
                "ind.object_id=tab.object_id "
                "join sys.schemas as sch on sch.schema_id=tab.schema_id "
                "where tab.name = :tabname "
                "and sch.name=:schname "
                "and ind.is_primary_key=0",
                bindparams=[
                    sql.bindparam('tabname', tablename, sqltypes.Unicode),
                    sql.bindparam('schname', current_schema, sqltypes.Unicode)
                ]))
        indexes = {}
        for row in rp:
            indexes[row['index_id']] = {
                'name': row['name'],
                'unique': row['is_unique'] == 1,
                'column_names': []
            }
        rp = connection.execute(
            sql.text(
                "select ind_col.index_id, col.name from sys.columns as col "
                "join sys.index_columns as ind_col on "
                "ind_col.column_id=col.column_id "
                "join sys.tables as tab on tab.object_id=col.object_id "
                "join sys.schemas as sch on sch.schema_id=tab.schema_id "
                "where tab.name=:tabname "
                "and sch.name=:schname",
                bindparams=[
                    sql.bindparam('tabname', tablename, sqltypes.Unicode),
                    sql.bindparam('schname', current_schema, sqltypes.Unicode)
                ]), )
        for row in rp:
            if row['index_id'] in indexes:
                indexes[row['index_id']]['column_names'].append(row['name'])

        return indexes.values()
示例#55
0
    def query(self):
        # Working from the waitdata detailed_db base query
        inner_query = powa_getwaitdata_detailed_db(bindparam("server"))
        inner_query = inner_query.alias()
        c = inner_query.c
        ps = powa_statements

        columns = [
            c.srvid, c.queryid, ps.c.query, c.event_type, c.event,
            sum(c.count).label("counts")
        ]
        from_clause = inner_query.join(ps, (ps.c.queryid == c.queryid) &
                                       (ps.c.dbid == c.dbid))
        return (select(columns).select_from(from_clause).where(
            c.datname == bindparam("database")).group_by(
                c.srvid, c.queryid, ps.c.query, c.event_type,
                c.event).order_by(sum(c.count).desc()))
示例#56
0
def save_marginals(session, L, marginals):
    """Save the marginal probs. for the Candidates corresponding to the rows of L in the Candidate table."""
    # Prepare bulk UPDATE query
    q = Candidate.__table__.update().\
            where(Candidate.id == bindparam('cid')).\
            values(training_marginal=bindparam('tm'))

    # Prepare values
    update_vals = [{
        'cid': L.get_candidate(session, i).id,
        'tm': marginals[i]
    } for i in range(len(marginals))]

    # Execute update
    session.execute(q, update_vals)
    session.commit()
    print "Saved %s training marginals" % len(marginals)
示例#57
0
def test_sqlalchemy_core(n=100000):
    init_sqlalchemy()
    t0 = time.time()
    num_insert = int(n * (1 - UPDATE_PORTION))
    engine.execute(Customer.__table__.insert(), [{
        "name": 'NAME ' + str(i)
    } for i in xrange(num_insert)])
    update_stmt = Customer.__table__.update().\
                    where(Customer.__table__.c.name == bindparam('oldname')).\
                    values(name=bindparam('newname'))
    engine.execute(update_stmt,
                   [{
                       "oldname": 'NAME ' + str(random.randint(0, num_insert)),
                       "newname": 'UPDATED ' + str(i)
                   } for i in xrange(n - num_insert)])
    print("SQLAlchemy Core: Total time for " + str(n) + " records " +
          str(time.time() - t0) + " secs")
示例#58
0
 def _get_bindparams(self, **kwargs):
     bindparams = []
     for k, v in kwargs.items():
         bindparams.append(
             sql.bindparam(k,
                           unicode(self.denormalize_name(v)),
                           type_=sqltypes.Unicode))
     return bindparams
示例#59
0
 def _getTableInfo(self, table_name):
     t = self.information_schema.tables['information_schema.tables']
     s = sql.select()
     if self.metadata.name:
         s.append_whereclause(t.c.table_schema == self.metadata.name)
     s.append_whereclause(t.c.table_name == sql.bindparam("table_name"))
     results = s.execute(table_name=table_name)
     return dict(results[0].items())
示例#60
0
    def update_stmt():
        clause = sql.and_()

        for col in mapper._pks_by_table[table]:
            clause.clauses.append(col == sql.bindparam(col._label,
                                            type_=col.type))

        return table.update(clause)