Пример #1
0
 def _search_for_join(mapper, table):
     """find a join between the given mapper's mapped table and the given table.
     will try the mapper's local table first for more specificity, then if not
     found will try the more general mapped table, which in the case of inheritance
     is a join."""
     try:
         return sql.join(mapper.local_table, table)
     except exceptions.ArgumentError, e:
         return sql.join(mapper.mapped_table, table)
def upgrade():
    op.add_column('request',
            sa.Column('payout', sa.Numeric(precision=15, scale=2), index=True,
                nullable=True))

    bind = op.get_bind()
    absolute = select([abs_table.c.value.label('value'),
                       mod_table.c.request_id.label('request_id')])\
            .select_from(join(abs_table, mod_table,
                    mod_table.c.id == abs_table.c.id))\
            .where(mod_table.c.voided_user_id == None)\
            .alias()
    relative = select([rel_table.c.value.label('value'),
                       mod_table.c.request_id.label('request_id')])\
            .select_from(join(rel_table, mod_table,
                    mod_table.c.id == rel_table.c.id))\
            .where(mod_table.c.voided_user_id == None)\
            .alias()
    abs_sum = select([request.c.id.label('request_id'),
                      request.c.base_payout.label('base_payout'),
                      func.sum(absolute.c.value).label('sum')])\
            .select_from(outerjoin(request, absolute,
                    request.c.id == absolute.c.request_id))\
            .group_by(request.c.id)\
            .alias()
    rel_sum = select([request.c.id.label('request_id'),
                      func.sum(relative.c.value).label('sum')])\
            .select_from(outerjoin(request, relative,
                    request.c.id == relative.c.request_id))\
            .group_by(request.c.id)\
            .alias()
    total_sum = select([abs_sum.c.request_id.label('request_id'),
                        ((
                            abs_sum.c.base_payout +
                            case([(abs_sum.c.sum == None, Decimal(0))],
                                    else_=abs_sum.c.sum)) *
                         (
                            1 +
                            case([(rel_sum.c.sum == None, Decimal(0))],
                                    else_=rel_sum.c.sum))).label('payout')])\
            .select_from(join(abs_sum, rel_sum,
                    abs_sum.c.request_id == rel_sum.c.request_id))
    payouts = bind.execute(total_sum)
    for request_id, payout in payouts:
        up = update(request).where(request.c.id == request_id).values(
                payout=payout)
        bind.execute(up)
    op.alter_column('request', 'payout', nullable=False,
            existing_type=sa.Numeric(precision=15, scale=2))
Пример #3
0
 def _determine_joins(self):
     if self.secondaryjoin is not None and self.secondary is None:
         raise exceptions.ArgumentError("Property '" + self.key + "' specified with secondary join condition but no secondary argument")
     # if join conditions were not specified, figure them out based on foreign keys
     try:
         if self.secondary is not None:
             if self.secondaryjoin is None:
                 self.secondaryjoin = sql.join(self.mapper.unjoined_table, self.secondary).onclause
             if self.primaryjoin is None:
                 self.primaryjoin = sql.join(self.parent.unjoined_table, self.secondary).onclause
         else:
             if self.primaryjoin is None:
                 self.primaryjoin = sql.join(self.parent.unjoined_table, self.target).onclause
     except exceptions.ArgumentError, e:
         raise exceptions.ArgumentError("Error determining primary and/or secondary join for relationship '%s'.  If the underlying error cannot be corrected, you should specify the 'primaryjoin' (and 'secondaryjoin', if there is an association table present) keyword arguments to the relation() function (or for backrefs, by specifying the backref using the backref() function with keyword arguments) to explicitly specify the join conditions.  Nested error is \"%s\"" % (str(self), str(e)))
Пример #4
0
    def __init__(self, left, right, onclause=None, isouter=False):
        if _is_mapped_class(left) or _is_mapped_class(right):
            if hasattr(left, '_orm_mappers'):
                left_mapper = left._orm_mappers[1]
                adapt_from = left.right
            else:
                left_mapper = _class_to_mapper(left)
                if _is_aliased_class(left):
                    adapt_from = left.alias
                else:
                    adapt_from = None

            right_mapper = _class_to_mapper(right)
            self._orm_mappers = (left_mapper, right_mapper)
            
            if isinstance(onclause, basestring):
                prop = left_mapper.get_property(onclause)

                if _is_aliased_class(right):
                    adapt_to = right.alias
                else:
                    adapt_to = None

                pj, sj, source, dest, target_adapter = prop._create_joins(source_selectable=adapt_from, dest_selectable=adapt_to, source_polymorphic=True, dest_polymorphic=True)

                if sj:
                    left = sql.join(left, prop.secondary, onclause=pj)
                    onclause = sj
                else:
                    onclause = pj
        expression.Join.__init__(self, left, right, onclause, isouter)
Пример #5
0
def messages_in_narrow_backend(request, user_profile,
                               msg_ids = REQ(validator=check_list(check_int)),
                               narrow = REQ(converter=narrow_parameter)):
    # type: (HttpRequest, UserProfile, List[int], List[Dict[str, Any]]) -> HttpResponse

    # Note that this function will only work on messages the user
    # actually received

    # TODO: We assume that the narrow is a search.  For now this works because
    # the browser only ever calls this function for searches, since it can't
    # apply that narrow operator itself.

    query = select([column("message_id"), column("subject"), column("rendered_content")],
                   and_(column("user_profile_id") == literal(user_profile.id),
                        column("message_id").in_(msg_ids)),
                   join(table("zerver_usermessage"), table("zerver_message"),
                        literal_column("zerver_usermessage.message_id") ==
                        literal_column("zerver_message.id")))

    builder = NarrowBuilder(user_profile, column("message_id"))
    for term in narrow:
        query = builder.add_term(query, term)

    sa_conn = get_sqlalchemy_connection()
    query_result = list(sa_conn.execute(query).fetchall())

    search_fields = dict()
    for row in query_result:
        (message_id, subject, rendered_content, content_matches, subject_matches) = row
        search_fields[message_id] = get_search_fields(rendered_content, subject,
                                                      content_matches, subject_matches)

    return json_success({"messages": search_fields})
Пример #6
0
 def query(self):
     pq = qualstat_getstatdata(column("eval_type") == "f")
     base = alias(pq)
     query = (select([
         func.array_agg(column("queryid")).label("queryids"),
         "qualid",
         cast(column("quals"), JSONB).label('quals'),
         "occurences",
         "execution_count",
         func.array_agg(column("query")).label("queries"),
         "avg_filter",
         "filter_ratio"
     ]).select_from(
         join(base, powa_databases,
              onclause=(
                  powa_databases.c.oid == literal_column("dbid"))))
         .where(powa_databases.c.datname == bindparam("database"))
         .where(column("avg_filter") > 1000)
         .where(column("filter_ratio") > 0.3)
         .group_by(column("qualid"), column("execution_count"),
                   column("occurences"),
                   cast(column("quals"), JSONB),
                  column("avg_filter"), column("filter_ratio"))
         .order_by(column("occurences").desc())
         .limit(200))
     return query
Пример #7
0
        def get_range_data(offset_, size_):
            tbl_main = alias(tbl_def, 't')
            join_condition = []

            pk_names_desc = [name+" DESC" for name in pk_names]
            sub_q = select(pk_cols).order_by(", ".join(pk_names_desc)).offset(offset_).limit(1).alias()

            for pk_name in pk_names:
                item = (tbl_main.c[pk_name] <= sub_q.c[pk_name])
                join_condition.append(item)

            if len(join_condition) > 1:
                j = join(tbl_main, sub_q, and_(*join_condition))
            else:
                j = join(tbl_main, sub_q, join_condition[0])

            return select([tbl_main]).select_from(j).order_by(", ".join(pk_names_desc)).limit(size_)
def team_score_list():
    teams = Team.query.filter(Team.role.in_([Team.BLUE, Team.RED]))
    scoring_teams = []
    for team in teams:
        temp = db.session.query(
                functions.sum(CheckResult.success * ServiceCheck.value),
                functions.sum(ServiceCheck.value)) \
            .select_from(
                join(CheckResult,
                     join(ServiceCheck, Service, ServiceCheck.service_id == Service.id),
                     CheckResult.check_id == ServiceCheck.id))

        services = temp.filter(Service.team_id == team.id).first()

        earned = 0
        maximum = 0
        if services[0]:
            earned = services[0]
            maximum = services[1]

        flag_subquery = db.session.\
            query(functions.count(FlagDiscovery.id).label('solve_count'), Flag.value).\
            select_from(join(Flag, FlagDiscovery, Flag.id == FlagDiscovery.flag_id)).\
            filter(Flag.team_id == team.id).\
            group_by(Flag.id).\
            subquery('flag_subquery')
        flags = db.session \
            .query(functions.sum(flag_subquery.c.solve_count * flag_subquery.c.value)).\
            first()

        flags = flags[0] if flags[0] else 0

        injects = score_injects(team)

        team.scores = {
            'services_earned': earned,
            'services_maximum': maximum,
            'injects_earned': injects,
            'flags_lost': flags
        }

        scoring_teams.append(team)

    return render_scoring_page('scoring/index.html', teams=scoring_teams)
Пример #9
0
 def test_mapify_with_table_object_join(self):
     t1 = Table('test_baph_mapify', self.orm.metadata, useexisting=True)
     t2 = Table('test_baph_mapify_join', self.orm.metadata, autoload=True,
                useexisting=True)
     tjoin = join(t1, t2)
     JoinObj = Mapify(self.orm, tjoin)(MapifiableClass)
     self.assertHasAttr(JoinObj, '__table__')
     self.assertHasAttr(JoinObj, 'id')
     self.assertHasAttr(JoinObj, 'string')
     self.assertHasAttr(JoinObj, 'number_with_decimal_point')
     self.assertHasAttr(JoinObj, 'other_string')
    def _getPostInfo(self, ctx, row):
        post_info = {
                'type': row['post_type'],
                'slug': row['post_name'],
                'datetime': row['post_date'],
                'title': row['post_title'],
                'status': row['post_status'],
                'post_id': row['ID'],
                'post_guid': row['guid'],
                'content': row['post_content'],
                'excerpt': row['post_excerpt']}

        res = ctx.conn.execute(
                select([ctx.users])
                .where(ctx.users.c.ID == row['post_author'])).fetchone()
        if res:
            post_info['author'] = res['user_login']
        else:
            logger.warning("No author on %s" % row['post_name'])
            post_info['author'] = ''

        # TODO: This is super slow. Gotta cache this thing somehow.
        res = ctx.conn.execute(
                join(ctx.term_relationships,
                     join(ctx.term_taxonomy, ctx.terms))
                .select(ctx.term_relationships.c.object_id == row['ID']))
        categories = []
        for r in res:
            if r['taxonomy'] != 'category':
                logger.debug("Skipping taxonomy '%s' on: %s" %
                             (r['taxonomy'], row['post_name']))
                continue
            categories.append(r['slug'])
        post_info['categories'] = categories

        metadata = {}
        post_info['metadata'] = metadata

        return post_info
Пример #11
0
def select_media(hashtag):

    j = join(TwitterMedia, TwitterHashtag, TwitterMedia.tweet_id == TwitterHashtag.tweet_id)
    q = select([TwitterMedia.media_url, TwitterMedia.tweet_id]).where(TwitterHashtag.hashtag == hashtag).where(TwitterMedia.enabled == True).select_from(j)

    log.debug(q)

    result = []

    for r in connection.execute(q).fetchall():
        result.append(r['media_url'])

    return result
Пример #12
0
 def sflvault_service_put(self, authtok, service_id, data):
     # 'user_id' required in session.
     # TODO: verify I had access to the service previously.
     req = sql.join(servicegroups_table, usergroups_table,
                    ServiceGroup.group_id==UserGroup.group_id) \
              .join(users_table, User.id==UserGroup.user_id) \
              .select() \
              .where(User.id==self.sess['user_id']) \
              .where(ServiceGroup.service_id==service_id)
     res = list(meta.Session.execute(req))
     if not res:
         return vaultMsg(False, "You don't have access to that service.")
     else:
         return self.vault.service_put(service_id, data)
Пример #13
0
    def __init__(self, left, right, onclause=None, 
                            isouter=False, join_to_left=True):
        adapt_from = None

        if hasattr(left, '_orm_mappers'):
            left_mapper = left._orm_mappers[1]
            if join_to_left:
                adapt_from = left.right
        else:
            left_mapper, left, left_is_aliased = _entity_info(left)
            if join_to_left and (left_is_aliased or not left_mapper):
                adapt_from = left

        right_mapper, right, right_is_aliased = _entity_info(right)
        if right_is_aliased:
            adapt_to = right
        else:
            adapt_to = None

        if left_mapper or right_mapper:
            self._orm_mappers = (left_mapper, right_mapper)

            if isinstance(onclause, basestring):
                prop = left_mapper.get_property(onclause)
            elif isinstance(onclause, attributes.QueryableAttribute):
                if adapt_from is None:
                    adapt_from = onclause.__clause_element__()
                prop = onclause.property
            elif isinstance(onclause, MapperProperty):
                prop = onclause
            else:
                prop = None

            if prop:
                pj, sj, source, dest, \
                secondary, target_adapter = prop._create_joins(
                                source_selectable=adapt_from,
                                dest_selectable=adapt_to,
                                source_polymorphic=True,
                                dest_polymorphic=True,
                                of_type=right_mapper)

                if sj is not None:
                    left = sql.join(left, secondary, pj, isouter)
                    onclause = sj
                else:
                    onclause = pj
                self._target_adapter = target_adapter

        expression.Join.__init__(self, left, right, onclause, isouter)
Пример #14
0
 def by_is(self, query, operand, maybe_negate):
     if operand == 'private':
         query = query.select_from(join(query.froms[0], "zerver_recipient",
                                        column("recipient_id") ==
                                        literal_column("zerver_recipient.id")))
         cond = or_(column("type") == Recipient.PERSONAL,
                    column("type") == Recipient.HUDDLE)
         return query.where(maybe_negate(cond))
     elif operand == 'starred':
         cond = column("flags").op("&")(UserMessage.flags.starred.mask) != 0
         return query.where(maybe_negate(cond))
     elif operand == 'mentioned' or operand == 'alerted':
         cond = column("flags").op("&")(UserMessage.flags.mentioned.mask) != 0
         return query.where(maybe_negate(cond))
     raise BadNarrowOperator("unknown 'is' operand " + operand)
Пример #15
0
 def by_is(self, query, operand, maybe_negate):
     if operand == 'private':
         query = query.select_from(
             join(
                 query.froms[0], "zerver_recipient",
                 column("recipient_id") == literal_column(
                     "zerver_recipient.id")))
         cond = or_(
             column("type") == Recipient.PERSONAL,
             column("type") == Recipient.HUDDLE)
         return query.where(maybe_negate(cond))
     elif operand == 'starred':
         cond = column("flags").op("&")(UserMessage.flags.starred.mask) != 0
         return query.where(maybe_negate(cond))
     elif operand == 'mentioned' or operand == 'alerted':
         cond = column("flags").op("&")(
             UserMessage.flags.mentioned.mask) != 0
         return query.where(maybe_negate(cond))
     raise BadNarrowOperator("unknown 'is' operand " + operand)
Пример #16
0
    def get_album_query(self):
        from masterapp.config.schema import dbfields

        # Number of songs available on this album subquery
        havesongs = Session.query(Album.id.label('albumid'),
            func.count(Song.id).label('Album_havesongs'),
            func.sum(Song.length).label('Album_length')
        ).join(Album.songs, SongOwner).filter(SongOwner.uid == self.id)
        havesongs = havesongs.group_by(Album.id).subquery()

        query = Session.query(SongOwner.uid.label('Friend_id'), havesongs.c.Album_havesongs,
            havesongs.c.Album_length, User._name.label('Friend_name'),
            *dbfields['album'])
        joined = join(Album, havesongs, Album.id == havesongs.c.albumid)
        query = query.select_from(joined)
        query = query.join(Album.artist).reset_joinpoint()
        query = query.join(Album.songs, SongOwner, SongOwner.user).filter(SongOwner.uid == self.id)
        query = query.group_by(Album)
        return query
 def _filter_by_event(self, first_name, last_name, year, from_dt, to_dt):
     """
     filter by event table
     :param first_name: batter first name
     :param last_name: batter last name
     :param year: season year
     :param from_dt: from date
     :param to_dt: to date
     :return: count
     """
     batter = self.get_player_data_one(year, first_name, last_name)
     return self.session.query(Event).select_from(join(Game, Event, Game.GAME_ID == Event.GAME_ID)).\
         filter(Event.BAT_ID == batter[rosters.c.PLAYER_ID.name]).\
         filter(
             Game.GAME_DT.between(
                 self.QUERY_DATE_FORMAT.format(year=year, dt=from_dt),
                 self.QUERY_DATE_FORMAT.format(year=year, dt=to_dt)
             )
         )
Пример #18
0
    async def get_method(self, id, method):
        if method == 'avg':
            from_date = self.get_int_param('fromDate')
            to_date = self.get_int_param('toDate')
            from_age = self.get_int_param('fromAge')
            to_age = self.get_int_param('toAge')
            gender = self.request.url.query.get('gender')
            if gender is not None and gender not in ['m', 'f']:
                raise web.HTTPBadRequest
            async with self.request.app['engine'].acquire() as conn:
                row = await conn.execute(
                    select([exists().where(Location.id == id)]))
                exists_ = await row.scalar()
                if not exists_:
                    raise web.HTTPNotFound

                query = select([func.avg(Visit.mark)
                                ]).where(Visit.location == id)
                if from_date is not None:
                    query = query.where(Visit.visited_at > from_date)
                if to_date is not None:
                    query = query.where(Visit.visited_at < to_date)
                if from_age is not None or to_age is not None or \
                   gender is not None:
                    query = query.select_from(
                        join(Visit, User, Visit.user == User.id))
                    # see https://stackoverflow.com/a/10258706/1336774
                    if from_age is not None:
                        query = query.where(
                            func.to_timestamp(User.birth_date) < func.now() -
                            text("'%d years'::interval" % from_age))
                    if to_age is not None:
                        query = query.where(
                            func.to_timestamp(User.birth_date) > func.now() -
                            text("'%d years'::interval" % to_age))
                    if gender is not None:
                        query = query.where(User.gender == gender)
                row = await conn.execute(query)
                avg = await row.scalar()
                if avg is None:
                    return web.json_response({'avg': 0.0})
                return web.json_response(
                    {'avg': float(round(avg, 5).normalize())})
Пример #19
0
def outer_with_filter(query, alias, relation, filter_clause):
    left = relation.prop.parent
    left_info = inspection.inspect(left)
    right_info = inspection.inspect(alias)
    adapt_to = right_info.selectable
    adapt_from = left_info.selectable
    pj, sj, source, dest, \
        secondary, target_adapter = relation.prop._create_joins(
            source_selectable=adapt_from,
            dest_selectable=adapt_to,
            source_polymorphic=True,
            dest_polymorphic=True,
            of_type=right_info.mapper)
    if sj is not None:
        # note this is an inner join from secondary->right
        right = sql.join(secondary, alias, sj)
    else:
        right = alias
    onclause = and_(_add_alias(pj, relation, alias), filter_clause)
    return query.outerjoin(right, onclause)
Пример #20
0
    def machine_list(self, customer_id=None):
        """Return a simple list of the machines"""
        sel = sql.join(customers_table, machines_table) \
                 .select(use_labels=True) \
                 .order_by(Customer.id)

        # Filter also..
        if customer_id:
            sel = sel.where(Customer.id==customer_id)

        lst = meta.Session.execute(sel)

        out = [{'id': x.machines_id, 'name': x.machines_name,
                  'fqdn': x.machines_fqdn, 'ip': x.machines_ip,
                  'location': x.machines_location, 'notes': x.machines_notes,
                  'customer_id': x.customers_id,
                  'customer_name': x.customers_name}
               for x in lst]

        return vaultMsg(True, "Here is the machines list", {'list': out})
Пример #21
0
    def machine_list(self, customer_id=None):
        """Return a simple list of the machines"""
        sel = sql.join(customers_table, machines_table) \
                 .select(use_labels=True) \
                 .order_by(Customer.id)

        # Filter also..
        if customer_id:
            sel = sel.where(Customer.id==customer_id)

        lst = meta.Session.execute(sel)

        out = [{'id': x.machines_id, 'name': x.machines_name,
                  'fqdn': x.machines_fqdn, 'ip': x.machines_ip,
                  'location': x.machines_location, 'notes': x.machines_notes,
                  'customer_id': x.customers_id,
                  'customer_name': x.customers_name}
               for x in lst]

        return vaultMsg(True, "Here is the machines list", {'list': out})
Пример #22
0
 def query(self):
     pq = qualstat_getstatdata(column("eval_type") == "f")
     base = alias(pq)
     query = (select([
         func.array_agg(column("queryid")).label("queryids"), "qualid",
         cast(column("quals"),
              JSONB).label('quals'), "occurences", "execution_count",
         func.array_agg(column("query")).label("queries"), "avg_filter",
         "filter_ratio"
     ]).select_from(
         join(base,
              powa_databases,
              onclause=(powa_databases.c.oid == literal_column("dbid")))
     ).where(powa_databases.c.datname == bindparam("database")).where(
         column("avg_filter") > 1000).where(
             column("filter_ratio") > 0.3).group_by(
                 column("qualid"), column("execution_count"),
                 column("occurences"), cast(column("quals"), JSONB),
                 column("avg_filter"), column("filter_ratio")).order_by(
                     column("occurences").desc()).limit(200))
     return query
Пример #23
0
    def setUp(self):
        from sqlalchemy.ext.declarative import declarative_base
        from sqlalchemy.orm import mapper
        from sqlalchemy.sql import join

        from sqlalchemy import Table, Column, Integer, String, ForeignKey

        self.session = tws.transactional_session()
        Base = declarative_base(metadata=sa.MetaData('sqlite:///:memory:'))
        Base.query = self.session.query_property()

        users_table = Table('users', Base.metadata,
                Column('user_id', Integer, primary_key=True),
                Column('name', String(40)),
        )
        addresses_table = Table('addresses', Base.metadata,
                Column('address_id', Integer, primary_key=True),
                Column('user_id', Integer, ForeignKey('users.user_id')),
                Column('place', String(40)),
        )
        
        class DBTestCls1(object):
            pass

        j = join(users_table, addresses_table)

        mapper(DBTestCls1, j, properties={
            'user_id': [users_table.c.user_id, addresses_table.c.user_id]
        })
    
        Base.metadata.create_all()
        
        self.DBTestCls1 = DBTestCls1

        transaction.commit()

        testapi.setup()
Пример #24
0
 def query(self):
     pq = qualstat_getstatdata(bindparam("server"),
                               column("eval_type") == "f")
     base = alias(pq)
     query = (
         select([
             # queryid in pg11+ is int64, so the value can exceed javascript's
             # Number.MAX_SAFE_INTEGER, which mean that the value can get
             # truncated by the browser, leading to looking for unexisting
             # queryid when processing this data.  To avoid that, simply cast
             # the value to text.
             func.array_agg(cast(column("queryid"),
                                 TEXT)).label("queryids"),
             column("qualid"),
             cast(column("quals"), JSONB).label('quals'),
             column("occurences"),
             column("execution_count"),
             func.array_agg(column("query")).label("queries"),
             column("avg_filter"),
             column("filter_ratio")
         ]).select_from(
             join(base,
                  powa_databases,
                  onclause=(powa_databases.c.oid == literal_column("dbid")
                            and powa_databases.c.srvid
                            == literal_column("srvid")))).
         where(powa_databases.c.datname == bindparam("database")).where(
             powa_databases.c.srvid == bindparam("server")).where(
                 column("avg_filter") > 1000).where(
                     column("filter_ratio") > 0.3).group_by(
                         column("qualid"), column("execution_count"),
                         column("occurences"), cast(column("quals"), JSONB),
                         column("avg_filter"),
                         column("filter_ratio")).order_by(
                             column("occurences").desc()).limit(200))
     return query
Пример #25
0
async def select_posts_with_filters(
    conn: SAConn,
    category=None,
    pet_type=None,
    location=None,
    post_id=None,
    direction=None,
):

    pet_type_alias = sa.alias(PetType, name="pet_type")
    location_alias = sa.alias(Location, name="location")
    j = join(Post, pet_type_alias,
             Post.pet_type_id == pet_type_alias.c.id).join(
                 location_alias, Post.location_id == location_alias.c.id)

    where_clauses = Post.visible == True

    columns_to_select = [
        Post.id,
        Post.title,
        pet_type_alias.c.name.label("pet_type_name"),
        pet_type_alias.c.emoji.label("pet_type_emoji"),
        location_alias.c.name.label("location_name"),
        location_alias.c.button_text.label("location_button_text"),
    ]

    if pet_type:
        where_clauses &= pet_type_alias.c.id == pet_type

    if location:
        where_clauses &= location_alias.c.id == location

    if category == "need_home":
        columns_to_select.append(Post.need_home)
        columns_to_select.append(Post.need_home_allow_other_location)

        where_clauses &= Post.need_home != None
        where_clauses &= Post.need_home != ""
        where_clauses &= Post.need_home_visible == True

    elif category == "need_temp":
        columns_to_select.append(Post.need_temp)
        where_clauses &= Post.need_temp != None
        where_clauses &= Post.need_temp != ""
        where_clauses &= Post.need_temp_visible == True

    elif category == "need_money":
        columns_to_select.append(Post.need_money)
        where_clauses &= Post.need_money != None
        where_clauses &= Post.need_money != ""
        where_clauses &= Post.need_money_visible == True

    elif category == "need_other":
        columns_to_select.append(Post.need_other)
        where_clauses &= Post.need_other != None
        where_clauses &= Post.need_other != ""
        where_clauses &= Post.need_other_visible == True
    else:
        columns_to_select += [
            Post.need_home,
            Post.need_temp,
            Post.need_money,
            Post.need_other,
        ]

    order_by = None

    if post_id:
        if direction:
            if direction == "<":
                where_clauses &= Post.id < post_id
                order_by = Post.id.desc()
            elif direction == ">":
                where_clauses &= Post.id > post_id
                order_by = Post.id.asc()
    else:
        order_by = Post.id.asc()

    q = select(columns_to_select).select_from(j).where(where_clauses)
    q = q.order_by(order_by)

    cursor = await conn.execute(q)
    return cursor
Пример #26
0
def create_session(name, echo=False):
    """ load UCSC table definitions and create session """
    global initialized, meta, DBSNP
    if initialized:
        return

    uri = config.get_database_uri(name)

    log.info('connecting to UCSC at ' + uri)
    engine = sa.create_engine(uri, echo=echo)
    Session.configure(bind=engine)
    conn = engine.connect()
    # try:
    #     log.info('loading cached UCSC table definitions')
    #     table_file = os.path.join(os.path.split(__file__)[0], '.tables.pickle')
    #     meta = pickle.load(file(table_file))
    #     meta.bind = engine
    # except IOError:
    #     print 'WARNING: could not load table metadata, please call cache_tables()'
    meta = sa.MetaData()
    meta.bind = conn
    meta.reflect()

    # populate tables namespace
    for (name, table) in meta.tables.items():
        if 'wgEncode' not in name:
            setattr(tables, name, table)


    # KGXref is one to one with knownGene, so we can safely always use this join
    join_knowngene_xref = sql.join(tables.knownGene, tables.kgXref,
        tables.kgXref.c.kgID==tables.knownGene.c.name
    )

    join_knowncanonical = join_knowngene_xref.join(tables.knownCanonical, # this join means known gene only returns canonical transcripts
         tables.knownCanonical.c.transcript==tables.knownGene.c.name
    )

    # get the most recent snp table available
    snp_tables = sorted([x for x in meta.tables if re.match('snp\d\d\d$', x)])
    snp_table = snp_tables[-1]
    DBSNP = meta.tables[snp_table]
    model.Snp.table = DBSNP
    orm.mapper(model.Snp, DBSNP, primary_key=DBSNP.c.name, properties={
        'class_': DBSNP.c['class'],
    })
    if snp_table + 'Common' in meta.tables:
        commonSnp = meta.tables[snp_table + 'Common']
        model.CommonSnp.table = commonSnp
        orm.mapper(model.CommonSnp, commonSnp, primary_key=commonSnp.c.name, properties={
            'class_': commonSnp.c['class'],
        })

    # TODO: should remove this join?
    orm.mapper(model.KnownGene, join_knowngene_xref, primary_key=tables.knownGene.c.name,
        exclude_properties=[tables.knownCanonical.c.chrom]
    )
    orm.mapper(model.KnownCanonical, join_knowncanonical, primary_key=tables.knownGene.c.name,
        exclude_properties=[tables.knownCanonical.c.chrom, tables.knownCanonical.c.transcript]
    )

    orm.mapper(model.CcdsGene, tables.ccdsGene, primary_key=tables.ccdsGene.c.name)
    orm.mapper(model.RefGene, tables.refGene, primary_key=tables.refGene.c.name)

    orm.mapper(model.ChainSelf, tables.chainSelf, primary_key=tables.chainSelf.c.id)
    orm.mapper(model.ChainSelfLink, tables.chainSelfLink,
        primary_key=[tables.chainSelfLink.c.qStart, tables.chainSelfLink.c.chainId],
        properties={
            'chain': orm.relationship(model.ChainSelf, backref='links',
                primaryjoin=tables.chainSelfLink.c.chainId==tables.chainSelf.c.id,
                foreign_keys=[tables.chainSelfLink.c.chainId],
                lazy=False
            ),
        }
    )
    # monkeypatch session to enforce readonly
    session.flush = abort_ro

    initialized = True
    model.session = session
    return session
Пример #27
0
        except InvalidReq, e:
            self.log_w('Service not found: %(service_id)s (%(error)s)', {
                "service_id": service_id,
                "error": str(e)
            })
            raise VaultError("Service not found: %s (%s)" %
                             (service_id, str(e)))
        # unused
        #me = query(User).get(self.myself_id)

        # We need no aliasing, because we'll only use `cryptgroupkey`,
        # `cryptsymkey` and `group_id` in there.
        req = sql.join(servicegroups_table, usergroups_table,
                       ServiceGroup.group_id==UserGroup.group_id) \
                 .join(users_table, User.id==UserGroup.user_id) \
                 .select(use_labels=True) \
                 .where(User.id==self.myself_id) \
                 .where(ServiceGroup.service_id==s.id) \
                 .order_by(ServiceGroup.group_id)

        # Deal with group if specified..
        if group_id:
            req = req.where(ServiceGroup.group_id == group_id)

        res = meta.Session.execute(req)

        # Take the first one
        uciphers = list(res)
        if not uciphers:
            ugcgk = ''
            sgcsk = ''
Пример #28
0
    def get_items_from_query(self, library):
        """Gets identifiers and its related title, medium, and authors from the
        database.
        Keeps track of the current 'ISBN' identifier and current item object that
        is being processed. If the next ISBN being processed is new, the existing one
        gets added to the list of items. If the ISBN is the same, then we append
        the Author property since there are multiple contributors.
        """
        collectionList = []
        for c in library.collections:
            collectionList.append(c.id)

        LEFT_OUTER_JOIN = True
        i1 = aliased(Identifier)
        i2 = aliased(Identifier)
        roles = list(Contributor.AUTHOR_ROLES)
        # TODO: We should handle the Narrator role properly, by
        # setting the 'narrator' field in the NoveList API document.
        # roles.append(Contributor.NARRATOR_ROLE)

        isbnQuery = select(
            [i1.identifier, i1.type, i2.identifier,
            Edition.title, Edition.medium,
            Contribution.role, Contributor.sort_name],
        ).select_from(
            join(LicensePool, i1, i1.id==LicensePool.identifier_id)
            .join(Equivalency, i1.id==Equivalency.input_id, LEFT_OUTER_JOIN)
            .join(i2, Equivalency.output_id==i2.id, LEFT_OUTER_JOIN)
            .join(
                Edition,
                or_(Edition.primary_identifier_id==i1.id, Edition.primary_identifier_id==i2.id)
            )
            .join(Contribution, Edition.id==Contribution.edition_id)
            .join(Contributor, Contribution.contributor_id==Contributor.id)
        ).where(
            and_(
                LicensePool.collection_id.in_(collectionList),
                or_(i1.type=="ISBN", i2.type=="ISBN"),
                or_(Contribution.role.in_(roles))
            )
        ).order_by(i1.identifier, i2.identifier)

        result = self._db.execute(isbnQuery)

        items = []
        newItem = None
        existingItem = None
        currentIdentifier = None
        for item in result:
            if newItem:
                existingItem = newItem
            (currentIdentifier, existingItem, newItem, addItem) = (
                self.create_item_object(item, currentIdentifier, existingItem)
            )

            if addItem and existingItem:
                # The Role property isn't needed in the actual request.
                del existingItem['role']
                items.append(existingItem)

        # For the case when there's only one item in `result`
        if newItem:
            del newItem['role']
            items.append(newItem)

        return items
Пример #29
0
    def setMapping(self):
        """
        Map the Metadata classes to the provider database schema
        """
        from sqlalchemy.orm import mapper, relationship, composite, synonym
        from sqlalchemy.sql import select, join

        schema = self.getSchema()

        alt_title_table = schema.tables['ALT_TITLE']
        mapper(AlternativeTitle, alt_title_table)
        
        citation_table = schema.tables['CITATION']
        mapper(ResourceLocator, citation_table, properties={
                'url': citation_table.c.ONLINERES,
                'name': citation_table.c.ONLINERESNAM,
                'description': citation_table.c.ONLINERESDESC,
                'function': citation_table.c.ONLINERESFUNC
                })
        resloc_res_table = schema.tables['RESLOC_RES']

        a_is_resolve_table = schema.tables['A_IS_RESOLVE']
        mapper(AdditionalInformation, citation_table)

        coupled_res_table = schema.tables['COUPLED_RES']
        mapper(CoupledResource, coupled_res_table)

        ctrlvocab_res_table = schema.tables['CTRLVOCAB_RES']
        mapper(Term, ctrlvocab_res_table)

        a_c_resolve_table = schema.tables['A_C_RESOLVE']
        mapper(AccessConstraint, a_c_resolve_table)

        access_use_table = schema.tables['ACCESS_USE']
        o_r_resolve_table = schema.tables['O_R_RESOLVE']
        a_u_resolve_table = schema.tables['A_U_RESOLVE']
        mapper(AccessUse, access_use_table)

        res_party_table = schema.tables['RES_PARTY']
        resparty_res_table = schema.tables['RESPARTY_RES']
        res_party_join = join(res_party_table, resparty_res_table)
        mapper(ResponsibleParty, res_party_join, properties={
            'RESPARTYID': [res_party_table.c.RESPARTYID, resparty_res_table.c.RESPARTYID],
            'ROLEID': resparty_res_table.c.ROLEID,
            'FIRSTNAME': res_party_table.c.FIRSTNAME,
            'SURNAME': res_party_table.c.SURNAME,
            'ORGID': res_party_table.c.ORGID,
            'position': res_party_table.c.POSITIONTITLE,
            'CONTACTID': res_party_table.c.CONTACTID
            })

        parent_id_table = schema.tables['PARENT_ID']
        
        metadata_table = schema.tables['METADATA']
        mapper(ParentId, metadata_table, properties={
                'id': metadata_table.c.IDENTIFIER,
                'codespace': metadata_table.c.CODESPACE
                })
        mapper(Metadata, metadata_table, properties={
            'METADATAID': metadata_table.c.METADATAID,
            'title': metadata_table.c.TITLE,
            'alt_titles': relationship(AlternativeTitle, order_by=AlternativeTitle.ALTTITLE),
            'abstract': metadata_table.c.ABSTRACT,
            'RESTYP_ID': metadata_table.c.RESTYP_ID,
            'resource_locators': relationship(ResourceLocator, secondary=resloc_res_table),
            'unique_id': composite(
                    UniqueId,
                    metadata_table.c.IDENTIFIER,
                    metadata_table.c.CODESPACE),
            'parent_id': relationship(ParentId,
                                      secondary=parent_id_table,
                                      primaryjoin=(metadata_table.c.METADATAID == parent_id_table.c.METADATAID),
                                      secondaryjoin=(metadata_table.c.METADATAID == parent_id_table.c.PARENTID),
                                      uselist=False),
            'coupled_resources': relationship(CoupledResource),
            'terms': relationship(Term),
            'SDSTYP_ID': metadata_table.c.SDSTYP_ID,
            'bounding_box': composite(
                    BoundingBox,
                    metadata_table.c.WEST,
                    metadata_table.c.SOUTH,
                    metadata_table.c.EAST,
                    metadata_table.c.NORTH),
            'vertical_extent': composite(
                    VerticalExtent,
                    metadata_table.c.VERTEXTMIN,
                    metadata_table.c.VERTEXTMAX,
                    metadata_table.c.VERTEXTREF_ID),
            'srs': metadata_table.c.SRSYS_ID,
            'temporal_reference': composite(
                    TemporalReference,
                    metadata_table.c.TEMPEXBGN,
                    metadata_table.c.TEMPEXEND,
                    metadata_table.c.PUBDATE,
                    metadata_table.c.REVDATE,
                    metadata_table.c.CREATED),
            'lineage': metadata_table.c.LINEAGE,
            'spatial_resolutions': composite(
                    SpatialResolution,
                    metadata_table.c.SPARES,
                    select(["NULL"])),
            'ADDITIONAL_INFO': relationship(AdditionalInformation, secondary=a_is_resolve_table),
            'ACCESS_CONSTRAINTS': relationship(AccessConstraint, order_by=AccessConstraint.ISOCODEID),
            'other_access_constraints': relationship(AccessUse, secondary=o_r_resolve_table),
            'use_limitations': relationship(AccessUse, secondary=a_u_resolve_table),
            'RESPARTY': relationship(ResponsibleParty),
            'date': metadata_table.c.MODDATE
        })
Пример #30
0
def _check_capacity_exceeded(conn, allocs):
    """Checks to see if the supplied allocation records would result in any of
    the inventories involved having their capacity exceeded.

    Raises an InvalidAllocationCapacityExceeded exception if any inventory
    would be exhausted by the allocation. If no inventories would be exceeded
    by the allocation, the function returns a list of `ResourceProvider`
    objects that contain the generation at the time of the check.

    :param conn: SQLalchemy Connection object to use
    :param allocs: List of `Allocation` objects to check
    """
    # The SQL generated below looks like this:
    # SELECT
    #   rp.id,
    #   rp.uuid,
    #   rp.generation,
    #   inv.resource_class_id,
    #   inv.total,
    #   inv.reserved,
    #   inv.allocation_ratio,
    #   allocs.used
    # FROM resource_providers AS rp
    # JOIN inventories AS i1
    # ON rp.id = i1.resource_provider_id
    # LEFT JOIN (
    #    SELECT resource_provider_id, resource_class_id, SUM(used) AS used
    #    FROM allocations
    #    WHERE resource_class_id IN ($RESOURCE_CLASSES)
    #    GROUP BY resource_provider_id, resource_class_id
    # ) AS allocs
    # ON inv.resource_provider_id = allocs.resource_provider_id
    # AND inv.resource_class_id = allocs.resource_class_id
    # WHERE rp.uuid IN ($RESOURCE_PROVIDERS)
    # AND inv.resource_class_id IN ($RESOURCE_CLASSES)
    #
    # We then take the results of the above and determine if any of the
    # inventory will have its capacity exceeded.
    res_classes = set([fields.ResourceClass.index(a.resource_class)
                       for a in allocs])
    provider_uuids = set([a.resource_provider.uuid for a in allocs])

    usage = sa.select([_ALLOC_TBL.c.resource_provider_id,
                       _ALLOC_TBL.c.consumer_id,
                       _ALLOC_TBL.c.resource_class_id,
                       sql.func.sum(_ALLOC_TBL.c.used).label('used')])
    usage = usage.where(_ALLOC_TBL.c.resource_class_id.in_(res_classes))
    usage = usage.group_by(_ALLOC_TBL.c.resource_provider_id,
                           _ALLOC_TBL.c.resource_class_id)
    usage = sa.alias(usage, name='usage')

    inv_join = sql.join(_RP_TBL, _INV_TBL,
            sql.and_(_RP_TBL.c.id == _INV_TBL.c.resource_provider_id,
                     _INV_TBL.c.resource_class_id.in_(res_classes)))
    primary_join = sql.outerjoin(inv_join, usage,
            _INV_TBL.c.resource_provider_id == usage.c.resource_provider_id)

    cols_in_output = [
        _RP_TBL.c.id.label('resource_provider_id'),
        _RP_TBL.c.uuid,
        _RP_TBL.c.generation,
        _INV_TBL.c.resource_class_id,
        _INV_TBL.c.total,
        _INV_TBL.c.reserved,
        _INV_TBL.c.allocation_ratio,
        usage.c.used,
    ]

    sel = sa.select(cols_in_output).select_from(primary_join)
    sel = sel.where(
            sa.and_(_RP_TBL.c.uuid.in_(provider_uuids),
                    _INV_TBL.c.resource_class_id.in_(res_classes)))
    records = conn.execute(sel)
    # Create a map keyed by (rp_uuid, res_class) for the records in the DB
    usage_map = {}
    provs_with_inv = set()
    for record in records:
        usage_map[(record['uuid'], record['resource_class_id'])] = record
        provs_with_inv.add(record["uuid"])
    # Ensure that all providers have existing inventory
    missing_provs = provider_uuids - provs_with_inv
    if missing_provs:
        raise exception.InvalidInventory(resource_class=str(res_classes),
                resource_provider=missing_provs)

    res_providers = {}
    for alloc in allocs:
        res_class = fields.ResourceClass.index(alloc.resource_class)
        rp_uuid = alloc.resource_provider.uuid
        key = (rp_uuid, res_class)
        usage = usage_map[key]
        amount_needed = alloc.used
        allocation_ratio = usage['allocation_ratio']
        # usage["used"] can be returned as None
        used = usage['used'] or 0
        capacity = (usage['total'] - usage['reserved']) * allocation_ratio
        if capacity < (used + amount_needed):
            raise exception.InvalidAllocationCapacityExceeded(
                resource_class=fields.ResourceClass.from_index(res_class),
                resource_provider=rp_uuid)
        if rp_uuid not in res_providers:
            rp = ResourceProvider(id=usage['resource_provider_id'],
                                  uuid=rp_uuid,
                                  generation=usage['generation'])
            res_providers[rp_uuid] = rp
    return list(res_providers.values())
Пример #31
0
    def do_init(self, key, parent):
        import sqlalchemy.orm
        if isinstance(self.argument, type):
            self.mapper = mapper.class_mapper(self.argument,
                                              compile=False)._do_compile()
        else:
            self.mapper = self.argument._do_compile()

        self.mapper = self.mapper.get_select_mapper()._do_compile()

        if self.association is not None:
            if isinstance(self.association, type):
                self.association = mapper.class_mapper(self.association,
                                                       compile=False)

        self.target = self.mapper.mapped_table

        if self.secondaryjoin is not None and self.secondary is None:
            raise exceptions.ArgumentError(
                "Property '" + self.key +
                "' specified with secondary join condition but no secondary argument"
            )
        # if join conditions were not specified, figure them out based on foreign keys
        if self.secondary is not None:
            if self.secondaryjoin is None:
                self.secondaryjoin = sql.join(self.mapper.unjoined_table,
                                              self.secondary).onclause
            if self.primaryjoin is None:
                self.primaryjoin = sql.join(parent.unjoined_table,
                                            self.secondary).onclause
        else:
            if self.primaryjoin is None:
                self.primaryjoin = sql.join(parent.unjoined_table,
                                            self.target).onclause

        # if the foreign key wasnt specified and theres no assocaition table, try to figure
        # out who is dependent on who. we dont need all the foreign keys represented in the join,
        # just one of them.
        if not len(self.foreignkey) and self.secondaryjoin is None:
            # else we usually will have a one-to-many where the secondary depends on the primary
            # but its possible that its reversed
            self._find_dependent()

        # if we are re-initializing, as in a copy made for an inheriting
        # mapper, dont re-evaluate the direction.
        if self.direction is None:
            self.direction = self._get_direction()

        if self.uselist is None and self.direction == sync.MANYTOONE:
            self.uselist = False

        if self.uselist is None:
            self.uselist = True

        self._compile_synchronizers()
        self._dependency_processor = dependency.create_dependency_processor(
            self.key,
            self.syncrules,
            self.cascade,
            secondary=self.secondary,
            association=self.association,
            is_backref=self.is_backref,
            post_update=self.post_update)

        # primary property handler, set up class attributes
        if self.is_primary():
            # if a backref name is defined, set up an extension to populate
            # attributes in the other direction
            if self.backref is not None:
                self.attributeext = self.backref.get_extension()

            # set our class attribute
            self._set_class_attribute(parent.class_, key)

            if self.backref is not None:
                self.backref.compile(self)
        elif not sessionlib.attribute_manager.is_class_managed(
                parent.class_, key):
            raise exceptions.ArgumentError(
                "Attempting to assign a new relation '%s' to a non-primary mapper on class '%s'.  New relations can only be added to the primary mapper, i.e. the very first mapper created for class '%s' "
                % (key, parent.class_.__name__, parent.class_.__name__))

        self.do_init_subclass(key, parent)
Пример #32
0
def add_owners(engine: Engine, metadata: Metadata) -> None:
    """
    Tag every object according to its owner:

      INSERT INTO tagged_object (tag_id, object_id, object_type)
      SELECT
        tag.id AS tag_id,
        slices.id AS object_id,
        'chart' AS object_type
      FROM slices
      JOIN tag
      ON tag.name = CONCAT('owner:', slices.created_by_fk)
      LEFT OUTER JOIN tagged_object
        ON tagged_object.tag_id = tag.id
        AND tagged_object.object_id = slices.id
        AND tagged_object.object_type = 'chart'
      WHERE tagged_object.tag_id IS NULL;

      SELECT
        tag.id AS tag_id,
        dashboards.id AS object_id,
        'dashboard' AS object_type
      FROM dashboards
      JOIN tag
      ON tag.name = CONCAT('owner:', dashboards.created_by_fk)
      LEFT OUTER JOIN tagged_object
        ON tagged_object.tag_id = tag.id
        AND tagged_object.object_id = dashboards.id
        AND tagged_object.object_type = 'dashboard'
      WHERE tagged_object.tag_id IS NULL;

      SELECT
        tag.id AS tag_id,
        saved_query.id AS object_id,
        'query' AS object_type
      FROM saved_query
      JOIN tag
      ON tag.name = CONCAT('owner:', saved_query.created_by_fk)
      LEFT OUTER JOIN tagged_object
        ON tagged_object.tag_id = tag.id
        AND tagged_object.object_id = saved_query.id
        AND tagged_object.object_type = 'query'
      WHERE tagged_object.tag_id IS NULL;

    """

    tag = metadata.tables["tag"]
    tagged_object = metadata.tables["tagged_object"]
    users = metadata.tables["ab_user"]
    slices = metadata.tables["slices"]
    dashboards = metadata.tables["dashboards"]
    saved_query = metadata.tables["saved_query"]
    columns = ["tag_id", "object_id", "object_type"]

    # create a custom tag for each user
    ids = select([users.c.id])
    insert = tag.insert()
    for (id_, ) in engine.execute(ids):
        try:
            engine.execute(insert, name=f"owner:{id_}", type=TagTypes.owner)
        except IntegrityError:
            pass  # already exists

    charts = (select([
        tag.c.id.label("tag_id"),
        slices.c.id.label("object_id"),
        literal(ObjectTypes.chart.name).label("object_type"),
    ]).select_from(
        join(
            join(
                slices,
                tag,
                tag.c.name == functions.concat("owner:",
                                               slices.c.created_by_fk),
            ),
            tagged_object,
            and_(
                tagged_object.c.tag_id == tag.c.id,
                tagged_object.c.object_id == slices.c.id,
                tagged_object.c.object_type == "chart",
            ),
            isouter=True,
            full=False,
        )).where(tagged_object.c.tag_id.is_(None)))
    query = tagged_object.insert().from_select(columns, charts)
    engine.execute(query)

    dashboards = (select([
        tag.c.id.label("tag_id"),
        dashboards.c.id.label("object_id"),
        literal(ObjectTypes.dashboard.name).label("object_type"),
    ]).select_from(
        join(
            join(
                dashboards,
                tag,
                tag.c.name == functions.concat("owner:",
                                               dashboards.c.created_by_fk),
            ),
            tagged_object,
            and_(
                tagged_object.c.tag_id == tag.c.id,
                tagged_object.c.object_id == dashboards.c.id,
                tagged_object.c.object_type == "dashboard",
            ),
            isouter=True,
            full=False,
        )).where(tagged_object.c.tag_id.is_(None)))
    query = tagged_object.insert().from_select(columns, dashboards)
    engine.execute(query)

    saved_queries = (select([
        tag.c.id.label("tag_id"),
        saved_query.c.id.label("object_id"),
        literal(ObjectTypes.query.name).label("object_type"),
    ]).select_from(
        join(
            join(
                saved_query,
                tag,
                tag.c.name == functions.concat("owner:",
                                               saved_query.c.created_by_fk),
            ),
            tagged_object,
            and_(
                tagged_object.c.tag_id == tag.c.id,
                tagged_object.c.object_id == saved_query.c.id,
                tagged_object.c.object_type == "query",
            ),
            isouter=True,
            full=False,
        )).where(tagged_object.c.tag_id.is_(None)))
    query = tagged_object.insert().from_select(columns, saved_queries)
    engine.execute(query)
Пример #33
0
 def _join(self, table: Table) -> Join:
     return join(
         Files, table, 
         (Files.c.start_time >= table.c.valid_from) & \
             (Files.c.stop_time <= table.c.valid_to)
     )
Пример #34
0
    def __init__(self,
                 class_,
                 table,
                 primarytable=None,
                 properties=None,
                 primary_key=None,
                 is_primary=False,
                 inherits=None,
                 inherit_condition=None,
                 extension=None,
                 order_by=False,
                 allow_column_override=False,
                 **kwargs):

        if primarytable is not None:
            sys.stderr.write(
                "'primarytable' argument to mapper is deprecated\n")

        if extension is None:
            self.extension = MapperExtension()
        else:
            self.extension = extension
        self.class_ = class_
        self.is_primary = is_primary
        self.order_by = order_by
        self._options = {}

        if not issubclass(class_, object):
            raise TypeError("Class '%s' is not a new-style class" %
                            class_.__name__)

        if isinstance(table, sql.Select):
            # some db's, noteably postgres, dont want to select from a select
            # without an alias.  also if we make our own alias internally, then
            # the configured properties on the mapper are not matched against the alias
            # we make, theres workarounds but it starts to get really crazy (its crazy enough
            # the SQL that gets generated) so just require an alias
            raise TypeError(
                "Mapping against a Select object requires that it has a name.  Use an alias to give it a name, i.e. s = select(...).alias('myselect')"
            )
        else:
            self.table = table

        if inherits is not None:
            self.primarytable = inherits.primarytable
            # inherit_condition is optional since the join can figure it out
            self.table = sql.join(inherits.table, table, inherit_condition)
        else:
            self.primarytable = self.table

        # locate all tables contained within the "table" passed in, which
        # may be a join or other construct
        tf = TableFinder()
        self.table.accept_visitor(tf)
        self.tables = tf.tables

        # determine primary key columns, either passed in, or get them from our set of tables
        self.pks_by_table = {}
        if primary_key is not None:
            for k in primary_key:
                self.pks_by_table.setdefault(
                    k.table, util.HashSet(ordered=True)).append(k)
                if k.table != self.table:
                    # associate pk cols from subtables to the "main" table
                    self.pks_by_table.setdefault(
                        self.table, util.HashSet(ordered=True)).append(k)
        else:
            for t in self.tables + [self.table]:
                try:
                    l = self.pks_by_table[t]
                except KeyError:
                    l = self.pks_by_table.setdefault(
                        t, util.HashSet(ordered=True))
                if not len(t.primary_key):
                    raise ValueError(
                        "Table " + t.name +
                        " has no primary key columns. Specify primary_key argument to mapper."
                    )
                for k in t.primary_key:
                    l.append(k)

        # make table columns addressable via the mapper
        self.columns = util.OrderedProperties()
        self.c = self.columns

        # object attribute names mapped to MapperProperty objects
        self.props = {}

        # table columns mapped to lists of MapperProperty objects
        # using a list allows a single column to be defined as
        # populating multiple object attributes
        self.columntoproperty = {}

        # load custom properties
        if properties is not None:
            for key, prop in properties.iteritems():
                if sql.is_column(prop):
                    try:
                        prop = self.table._get_col_by_original(prop)
                    except KeyError:
                        raise ValueError(
                            "Column '%s' is not represented in mapper's table"
                            % prop._label)
                    self.columns[key] = prop
                    prop = ColumnProperty(prop)
                elif isinstance(prop, list) and sql.is_column(prop[0]):
                    try:
                        prop = [
                            self.table._get_col_by_original(p) for p in prop
                        ]
                    except KeyError, e:
                        raise ValueError(
                            "Column '%s' is not represented in mapper's table"
                            % e.args[0])
                    self.columns[key] = prop[0]
                    prop = ColumnProperty(*prop)
                self.props[key] = prop
                if isinstance(prop, ColumnProperty):
                    for col in prop.columns:
                        proplist = self.columntoproperty.setdefault(
                            col.original, [])
                        proplist.append(prop)
Пример #35
0
 def listHourEntries(self, student_id, semester_id):
     return select([self.hours_entry, self.organization.c.name], 
             and_(self.hours_entry.c.student == student_id, 
                  self.hours_entry.c.semester == semester_id), 
             [join(self.hours_entry, self.organization)])
Пример #36
0
def campaign_stats(campaign_id):
    campaign = Campaign.query.filter_by(id=campaign_id).first_or_404()

    # number of sessions started in campaign
    # total count and average queue_delay
    sessions_started, queue_avg_timedelta = db.session.query(
        func.count(Session.id).label('count'),
        func.avg(Session.queue_delay).label('queue_avg')).filter_by(
            campaign_id=campaign.id).all()[0]
    if isinstance(queue_avg_timedelta, timedelta):
        queue_avg_seconds = queue_avg_timedelta.total_seconds()
    else:
        queue_avg_seconds = ''

    # number of sessions with at least one completed call in campaign
    sessions_completed = (db.session.query(func.count(
        Session.id.distinct())).select_from(join(Session, Call)).filter(
            Call.campaign_id == campaign.id,
            Call.status == 'completed')).scalar()

    # number of calls completed in campaign
    calls_completed = db.session.query(Call.timestamp, Call.id).filter_by(
        campaign_id=campaign.id, status='completed')

    # get completed calls per session in campaign
    calls_per_session = db.session.query(
        func.count(Call.id.distinct()).label('call_count'), ).filter(
            Call.campaign_id == campaign.id, Call.status == 'completed',
            Call.session_id != None).group_by(Call.session_id)
    calls_per_session_avg = db.session.query(
        func.avg(calls_per_session.subquery().columns.call_count), )
    # use this one weird trick to calculate the median
    # https://stackoverflow.com/a/27826044
    calls_per_session_med = db.session.query(
        func.percentile_cont(0.5).within_group(
            calls_per_session.subquery().columns.call_count.desc()))
    # calls_session_list = [int(n[0]) for n in calls_session_grouped.all()]
    calls_per_session = {
        'avg': '%.2f' % (calls_per_session_avg.scalar() or 0),
        'med': calls_per_session_med.scalar() or '?'
    }

    data = {
        'id': campaign.id,
        'name': campaign.name,
        'sessions_started': sessions_started,
        'queue_avg_seconds': queue_avg_seconds,
        'sessions_completed': sessions_completed,
        'calls_per_session': calls_per_session,
        'calls_completed': calls_completed.count()
    }

    if data['calls_completed']:
        first_call_completed = calls_completed.first()
        last_call_completed = calls_completed.order_by(
            Call.timestamp.desc()).first()
        data.update({
            'date_start':
            datetime.strftime(first_call_completed[0], '%Y-%m-%d'),
            'date_end':
            datetime.strftime(last_call_completed[0] + timedelta(days=1),
                              '%Y-%m-%d'),
        })

    return jsonify(data)
Пример #37
0
    def build_agp_view(self):
        study_ids = self._get_study_ids()
        gene_set_ids = self._get_gene_set_ids()
        current_join = None
        select_cols = [self.gene_symbols.c.symbol_name]

        for category in self.configuration["gene_sets"]:
            category_id = category["category"]
            table_alias = aliased(
                self.categories_ranks,
                f"{category_id}_rank"
            )
            left = current_join
            if left is None:
                left = self.gene_symbols

            current_join = join(
                left, table_alias,
                and_(
                    self.gene_symbols.c.id == table_alias.c.symbol_id,
                    table_alias.c.category_id == category_id
                )
            )

            select_cols.append(
                table_alias.c.count.label(f"{category_id}_rank")
            )

            for gs in category["sets"]:
                set_id = gs["set_id"]
                collection_id = gs["collection_id"]
                full_set_id = f"{collection_id}_{set_id}"
                set_alias = full_set_id
                table_alias = aliased(
                    self.gene_symbol_sets,
                    set_alias
                )
                left = current_join
                if left is None:
                    left = self.gene_symbols

                current_join = join(
                    left, table_alias,
                    and_(
                        self.gene_symbols.c.id == table_alias.c.symbol_id,
                        table_alias.c.set_id == gene_set_ids[full_set_id]
                    ),
                    isouter=True
                )

                select_cols.append(table_alias.c.present.label(set_alias))

        for category in self.configuration["genomic_scores"]:
            category_name = category["category"]
            for score in category["scores"]:
                score_name = score["score_name"]
                score_alias = f"{category_name}_{score_name}"
                table_alias = aliased(
                    self.genomic_scores,
                    score_alias
                )
                left = current_join
                if left is None:
                    left = self.gene_symbols

                current_join = join(
                    left, table_alias,
                    and_(
                        self.gene_symbols.c.id == table_alias.c.symbol_id,
                        table_alias.c.score_category == category_name,
                        table_alias.c.score_name == score_name
                    )
                )

                select_cols.append(
                    table_alias.c.score_value.label(score_alias)
                )

        for dataset_id, dataset in self.configuration["datasets"].items():
            config_section = self.configuration["datasets"][dataset_id]
            db_study_id = study_ids[dataset_id]
            for person_set in config_section["person_sets"]:
                set_name = person_set["set_name"]
                for stat in config_section["statistics"]:
                    stat_id = stat["id"]
                    count_alias = f"{dataset_id}_{set_name}_{stat_id}"
                    rate_alias = f"{count_alias}_rate"
                    table_alias = aliased(
                        self.variant_counts,
                        count_alias
                    )
                    left = current_join
                    if left is None:
                        left = self.gene_symbols

                    current_join = join(
                        left, table_alias,
                        and_(
                            self.gene_symbols.c.id == table_alias.c.symbol_id,
                            table_alias.c.study_id == db_study_id,
                            table_alias.c.people_group == set_name,
                            table_alias.c.statistic_id == stat_id
                        )
                    )
                    select_cols.extend([
                        table_alias.c.count.label(count_alias),
                        table_alias.c.rate.label(rate_alias)
                    ])

        view_query = select(select_cols).select_from(current_join)

        with self.engine.connect() as connection:
            connection.execute("DROP VIEW IF EXISTS agp_view")
            view_create = CreateView("agp_view", view_query)
            connection.execute(view_create)

        self._agp_view = Table("agp_view", self.metadata, autoload=True)
Пример #38
0
    def _fetch(self, params, offset, limit):
        query = select([tbl.concept.c.id,
                        tbl.concept.c.name,
                        tbl.concept.c.scheme_id,
                    ])\
            .select_from(tbl.concept)\
            .order_by(tbl.concept.c.name)

        if 'scheme' in params:
            scheme = params['scheme']
            query = query.where(tbl.concept.c.scheme_id==scheme._alchemy_pk)
        elif 'pks' in params:
            if not params['pks']:
                return []
            query = query.where(tbl.scheme.c.id.in_(params['pks']))
        else:
            raise NotImplementedError

        if 'name' in params:
            query = query.where(tbl.concept.c.name==params['name'])

        if offset is not None:
            query = query.offset(offset)

        if limit is not None:
            query = query.limit(limit)

        records = self.session.execute(query).fetchall()
        concepts_d = {}
        concepts_l = []
        pks = []
        for pk, name, scheme_id in records:
            pks.append(pk)
            scheme = self.conn.identity_map.get('schemes', scheme_id)
            if not scheme:
                raise RuntimeError
            concept = Concept(name=name, scheme=scheme)
            concept._alchemy_pk = pk
            concepts_d[pk] = concept
            concepts_l.append(concept)

        # Labels
        query = select([tbl.concept_label.c.id,
                        tbl.concept_label.c.concept_id,
                        tbl.concept_label.c.type,
                        tbl.concept_label.c.lang,
                        tbl.concept_label.c.label, ])\
            .select_from(
                join(tbl.concept, tbl.concept_label,
                     tbl.concept.c.id==tbl.concept_label.c.concept_id)
            )\
            .where(tbl.concept.c.id.in_(pks))\
            .order_by(tbl.concept_label.c.lang,
                      tbl.concept_label.c.type,
                      tbl.concept_label.c.label,)

        if 'name' in params:
            query = query.where(tbl.concept.c.name==params['name'])

        records = self.session.execute(query).fetchall()
        for pk, concept_id, label_type, lang, title in records:
            concepts_d[concept_id].labels._add_raw(lang, label_type, title, pk)

        # Notes
        query = select([tbl.concept_note.c.id,
                        tbl.concept_note.c.concept_id,
                        tbl.concept_note.c.type,
                        tbl.concept_note.c.lang,
                        tbl.concept_note.c.text, ])\
            .select_from(
                join(tbl.concept, tbl.concept_note,
                     tbl.concept.c.id==tbl.concept_note.c.concept_id)
            )\
            .where(tbl.concept.c.id.in_(pks))\
            .order_by(tbl.concept_note.c.lang,
                      tbl.concept_note.c.type,
                      tbl.concept_note.c.text,)

        if 'name' in params:
            query = query.where(tbl.concept.c.name==params['name'])

        records = self.session.execute(query).fetchall()
        for pk, concept_id, note_type, lang, title in records:
            concepts_d[concept_id].notes._add_raw(lang, note_type, title, pk)

        return concepts_l
Пример #39
0
def add_types(engine: Engine, metadata: Metadata) -> None:
    """
    Tag every object according to its type:

      INSERT INTO tagged_object (tag_id, object_id, object_type)
      SELECT
        tag.id AS tag_id,
        slices.id AS object_id,
        'chart' AS object_type
      FROM slices
      JOIN tag
        ON tag.name = 'type:chart'
      LEFT OUTER JOIN tagged_object
        ON tagged_object.tag_id = tag.id
        AND tagged_object.object_id = slices.id
        AND tagged_object.object_type = 'chart'
      WHERE tagged_object.tag_id IS NULL;

      INSERT INTO tagged_object (tag_id, object_id, object_type)
      SELECT
        tag.id AS tag_id,
        dashboards.id AS object_id,
        'dashboard' AS object_type
      FROM dashboards
      JOIN tag
      ON tag.name = 'type:dashboard'
      LEFT OUTER JOIN tagged_object
        ON tagged_object.tag_id = tag.id
        AND tagged_object.object_id = dashboards.id
        AND tagged_object.object_type = 'dashboard'
      WHERE tagged_object.tag_id IS NULL;

      INSERT INTO tagged_object (tag_id, object_id, object_type)
      SELECT
        tag.id AS tag_id,
        saved_query.id AS object_id,
        'query' AS object_type
      FROM saved_query
      JOIN tag
      ON tag.name = 'type:query';
      LEFT OUTER JOIN tagged_object
        ON tagged_object.tag_id = tag.id
        AND tagged_object.object_id = saved_query.id
        AND tagged_object.object_type = 'query'
      WHERE tagged_object.tag_id IS NULL;

    """

    tag = metadata.tables["tag"]
    tagged_object = metadata.tables["tagged_object"]
    slices = metadata.tables["slices"]
    dashboards = metadata.tables["dashboards"]
    saved_query = metadata.tables["saved_query"]
    columns = ["tag_id", "object_id", "object_type"]

    # add a tag for each object type
    insert = tag.insert()
    for type_ in ObjectTypes.__members__:  # pylint: disable=not-an-iterable
        try:
            engine.execute(insert, name=f"type:{type_}", type=TagTypes.type)
        except IntegrityError:
            pass  # already exists

    charts = (select([
        tag.c.id.label("tag_id"),
        slices.c.id.label("object_id"),
        literal(ObjectTypes.chart.name).label("object_type"),
    ]).select_from(
        join(
            join(slices, tag, tag.c.name == "type:chart"),
            tagged_object,
            and_(
                tagged_object.c.tag_id == tag.c.id,
                tagged_object.c.object_id == slices.c.id,
                tagged_object.c.object_type == "chart",
            ),
            isouter=True,
            full=False,
        )).where(tagged_object.c.tag_id.is_(None)))
    query = tagged_object.insert().from_select(columns, charts)
    engine.execute(query)

    dashboards = (select([
        tag.c.id.label("tag_id"),
        dashboards.c.id.label("object_id"),
        literal(ObjectTypes.dashboard.name).label("object_type"),
    ]).select_from(
        join(
            join(dashboards, tag, tag.c.name == "type:dashboard"),
            tagged_object,
            and_(
                tagged_object.c.tag_id == tag.c.id,
                tagged_object.c.object_id == dashboards.c.id,
                tagged_object.c.object_type == "dashboard",
            ),
            isouter=True,
            full=False,
        )).where(tagged_object.c.tag_id.is_(None)))
    query = tagged_object.insert().from_select(columns, dashboards)
    engine.execute(query)

    saved_queries = (select([
        tag.c.id.label("tag_id"),
        saved_query.c.id.label("object_id"),
        literal(ObjectTypes.query.name).label("object_type"),
    ]).select_from(
        join(
            join(saved_query, tag, tag.c.name == "type:query"),
            tagged_object,
            and_(
                tagged_object.c.tag_id == tag.c.id,
                tagged_object.c.object_id == saved_query.c.id,
                tagged_object.c.object_type == "query",
            ),
            isouter=True,
            full=False,
        )).where(tagged_object.c.tag_id.is_(None)))
    query = tagged_object.insert().from_select(columns, saved_queries)
    engine.execute(query)
Пример #40
0
        try:
            s = query(Service).filter_by(id=service_id).one()
        except InvalidReq, e:
            self.log_w('Service not found: %(service_id)s (%(error)s)',
                       {"service_id": service_id, "error": str(e)})
            raise VaultError("Service not found: %s (%s)" % (service_id,
                                                             str(e)))
        # unused
        #me = query(User).get(self.myself_id)

        # We need no aliasing, because we'll only use `cryptgroupkey`,
        # `cryptsymkey` and `group_id` in there.
        req = sql.join(servicegroups_table, usergroups_table,
                       ServiceGroup.group_id==UserGroup.group_id) \
                 .join(users_table, User.id==UserGroup.user_id) \
                 .select(use_labels=True) \
                 .where(User.id==self.myself_id) \
                 .where(ServiceGroup.service_id==s.id) \
                 .order_by(ServiceGroup.group_id)

        # Deal with group if specified..
        if group_id:
            req = req.where(ServiceGroup.group_id == group_id)

        res = meta.Session.execute(req)

        # Take the first one
        uciphers = list(res)
        if not uciphers:
            ugcgk = ''
            sgcsk = ''
Пример #41
0
def add_favorites(engine: Engine, metadata: Metadata) -> None:
    """
    Tag every object that was favorited:

      INSERT INTO tagged_object (tag_id, object_id, object_type)
      SELECT
        tag.id AS tag_id,
        favstar.obj_id AS object_id,
        LOWER(favstar.class_name) AS object_type
      FROM favstar
      JOIN tag
      ON tag.name = CONCAT('favorited_by:', favstar.user_id)
      LEFT OUTER JOIN tagged_object
        ON tagged_object.tag_id = tag.id
        AND tagged_object.object_id = favstar.obj_id
        AND tagged_object.object_type = LOWER(favstar.class_name)
      WHERE tagged_object.tag_id IS NULL;

    """

    tag = metadata.tables["tag"]
    tagged_object = metadata.tables["tagged_object"]
    users = metadata.tables["ab_user"]
    favstar = metadata.tables["favstar"]
    columns = ["tag_id", "object_id", "object_type"]

    # create a custom tag for each user
    ids = select([users.c.id])
    insert = tag.insert()
    for (id_, ) in engine.execute(ids):
        try:
            engine.execute(insert,
                           name=f"favorited_by:{id_}",
                           type=TagTypes.type)
        except IntegrityError:
            pass  # already exists

    favstars = (select([
        tag.c.id.label("tag_id"),
        favstar.c.obj_id.label("object_id"),
        func.lower(favstar.c.class_name).label("object_type"),
    ]).select_from(
        join(
            join(
                favstar,
                tag,
                tag.c.name == functions.concat("favorited_by:",
                                               favstar.c.user_id),
            ),
            tagged_object,
            and_(
                tagged_object.c.tag_id == tag.c.id,
                tagged_object.c.object_id == favstar.c.obj_id,
                tagged_object.c.object_type == func.lower(
                    favstar.c.class_name),
            ),
            isouter=True,
            full=False,
        )).where(tagged_object.c.tag_id.is_(None)))
    query = tagged_object.insert().from_select(columns, favstars)
    engine.execute(query)
Пример #42
0
from sqlalchemy.orm import sessionmaker
from model import TT_Report, TT_Location, engine
from sqlalchemy.sql import join

Session = sessionmaker(bind=engine)
session = Session()
q = session.query(TT_Location)
for loc in q:
    print("%s %s %s" % (loc.id, loc.location, loc.long))

columns = [
    TT_Report.img_file, TT_Report.comment, TT_Location.location,
    TT_Location.long, TT_Location.lat
]

q = join(TT_Report, TT_Location,
         TT_Report.location_id == TT_Location.id).select()
q = q.with_only_columns(columns)

for row in q.execute():
    print(dict(row))
Пример #43
0
def main():
    if len(sys.argv) != 3:
        print("Not enough arguments...")
        exit(0)

    write_to = os.path.abspath(sys.argv[2])

    if os.path.exists(write_to) and not os.path.isdir(write_to):
        print("Target already exists, and is not directory")
        exit(-1)

    if os.path.isdir(write_to):
        print("Target already exists, will overwrite files...")
        print("Sleeping for 5 seconds")
        time.sleep(5)
    else:
        os.mkdir(write_to)

    engine = create_engine(sys.argv[1], echo=True)
    conn = engine.connect()

    nodes = select(
        ["node.nid, node.vid, node.title, node_revisions.body, node.created"]
        ).where(
            and_(
                "node.status = 1",
                "node.vid = node_revisions.vid"
                )
        ).select_from("node, node_revisions")

    tags = select(
            ["term_data.name"]
            ).where(
                    and_(
                        "term_node.vid = :vid",
                        "term_node.nid = :nid"
                    )
            ).select_from(
                    join("term_node", "term_data", text("term_node.tid = term_data.tid")))

    url_alias = select(
            ["src, dst"]
            ).where(
                    "url_alias.src = CONCAT('node/', :nid)"
            ).select_from("url_alias")

    for rnode in conn.execute(nodes):
        (nid, vid, title, body, created) = rnode

        created_date = datetime.datetime.fromtimestamp(created)

        node = {
                'nid': nid,
                'vid': vid,
                'title': title,
                'body': body,
                'created': created,
                'created_year': created_date.strftime('%Y'),
                'created_month': created_date.strftime('%m'),
                'created_day': created_date.strftime('%d'),
                'created_formatted': created_date.strftime('%Y-%m-%d %H:%M:%S'),
                'tags': [],
                'alias': '',
                }

        for rtag in conn.execute(tags, vid=vid, nid=nid):
            (tag,) = rtag

            node['tags'].append(tag)

        node['tags'] = ', '.join(node['tags'])
        

        alias = conn.execute(url_alias, nid=nid).first()

        if alias is not None:
            node['alias'] = ', '.join(alias)

        year_dir = os.path.join(write_to, node['created_year'])
        try:
            os.mkdir(year_dir)
        except:
            pass

        with open(os.path.join(year_dir, '{}-{}-{} - {}.md'.format(node['created_year'], node['created_month'], node['created_day'], node['title']).replace('/', ':')), 'w') as f:
            f.write('---\n')
            f.write(
                yaml.dump(
                {
                    'title': node['title'],
                    'categories': node['tags'],
                    'date': node['created_formatted'],
                    'aliases': node['alias'],
                },
                default_flow_style=False
                )
            )
            f.write('---\n')
            f.write(node['body'])
            f.write('\n')
Пример #44
0
 def getSemesterReflections(self, semester_id):
     return select([self.student_semester.c.reflection, self.student.c.name, self.semester.c.name],
             self.student_semester.c.semester == semester_id, [join(self.student_semester, self.student).join(self.semester)], use_labels=True);
Пример #45
0
def placement_get_select(args, res_template, worker_number):
    """
    Returns a sqlalchemy.Select object representing the query against the
    resource_providers schema tables, using derived table queries against the
    allocations and inventories tables. The SQL generated from this looks like
    the following. (If the filter_strategy is not 'db', then the WHERE clause
    is not appended to the SQL):

        SELECT cn.id, cn.generation 
        FROM resource_providers AS cn
        INNER JOIN inventories AS ram_filtered
        ON cn.id = ram_filtered.resource_provider_id
        AND ram_filtered.resource_class_id = 2
        LEFT OUTER JOIN (
          SELECT 
            allocations.resource_provider_id AS resource_provider_id,
            sum(allocations.used) AS used 
          FROM allocations 
          WHERE allocations.resource_class_id = 2
          GROUP BY allocations.resource_provider_id
        ) AS ram_usage
        ON ram_filtered.resource_provider_id = ram_usage.resource_provider_id
        INNER JOIN inventories AS cpu_filtered
        ON ram_filtered.resource_provider_id = cpu_filtered.resource_provider_id
        AND cpu_filtered.resource_class_id = 1
        LEFT OUTER JOIN (
          SELECT
            allocations.resource_provider_id AS resource_provider_id,
            sum(allocations.used) AS used
          FROM allocations 
          WHERE allocations.resource_class_id = 1
          GROUP BY allocations.resource_provider_id
        ) AS cpu_usage
        ON cpu_filtered.resource_provider_id = cpu_usage.resource_provider_id
        WHERE
        ram_filtered.min_unit <= 64
        AND ram_filtered.max_unit >= 64
        AND floor((ram_filtered.total - ram_filtered.reserved) * ram_filtered.allocation_ratio) - ifnull(ram_usage.used, 0) >= 64
        AND cpu_filtered.min_unit <= 1
        AND cpu_filtered.max_unit >= 1
        AND floor((cpu_filtered.total - cpu_filtered.reserved) * cpu_filtered.allocation_ratio) - ifnull(cpu_usage.used, 0) >= 1
        LIMIT 1

    Depending on the partition and placement strategy, there may be additional
    WHERE clauses that look like the following:

        # 'modulo' partition strategy
        AND (cn.id + $NUM_WORKERS) % NUM_WORKERS == 0

        or:

        # 'random' placement strategy
        AND cn.id >= $RANDOM_COMPUTE_NODE_ID

    The ORDER BY clause will depend on the placement strategy and may look like
    the following:

        # 'pack' placement strategy
        ORDER BY IFNULL(ram_usage.used, 0) ASC, cn.id ASC

        or:

        # 'spread' placement strategy
        ORDER BY IFNULL(ram_usage.used, 0) DESC, cn.id ASC
    """
    (rp_tbl, agg_tbl, rp_agg_tbl, inv_tbl, alloc_tbl) = placement_get_tables()
    cn_tbl = sa.alias(rp_tbl, name='cn')
    ram_filtered = sa.alias(inv_tbl, name='ram_filtered')
    cpu_filtered = sa.alias(inv_tbl, name='cpu_filtered')

    ram_usage = sa.select([alloc_tbl.c.resource_provider_id,
                           sql.func.sum(alloc_tbl.c.used).label('used')])
    ram_usage = ram_usage.where(alloc_tbl.c.resource_class_id == const.RAM_MB)
    ram_usage = ram_usage.group_by(alloc_tbl.c.resource_provider_id)
    ram_usage = sa.alias(ram_usage, name='ram_usage')

    cpu_usage = sa.select([alloc_tbl.c.resource_provider_id,
                           sql.func.sum(alloc_tbl.c.used).label('used')])
    cpu_usage = cpu_usage.where(alloc_tbl.c.resource_class_id == const.VCPU)
    cpu_usage = cpu_usage.group_by(alloc_tbl.c.resource_provider_id)
    cpu_usage = sa.alias(cpu_usage, name='cpu_usage')

    ram_inv_join = sql.join(cn_tbl, ram_filtered,
                            sql.and_(
                                cn_tbl.c.id == ram_filtered.c.resource_provider_id,
                                ram_filtered.c.resource_class_id == const.RAM_MB))
    ram_join = sql.outerjoin(ram_inv_join, ram_usage,
                             ram_filtered.c.resource_provider_id == ram_usage.c.resource_provider_id)
    cpu_inv_join = sql.join(ram_join, cpu_filtered,
                            sql.and_(
                                ram_filtered.c.resource_provider_id == cpu_filtered.c.resource_provider_id,
                                cpu_filtered.c.resource_class_id == const.VCPU))
    cpu_join = sql.outerjoin(cpu_inv_join, cpu_usage,
                             cpu_filtered.c.resource_provider_id == cpu_usage.c.resource_provider_id)

    cols_in_output = [cn_tbl.c.id, cn_tbl.c.generation]
    if args.filter_strategy == 'python':
        # When we don't do stuff on the DB side, we need to pass back
        # a whole lot more columns since we have Python code loops
        # that need to process these data fields.
        cols_in_output.extend([
           ram_filtered.c.total.label('ram_total'),
           ram_filtered.c.reserved.label('ram_reserved'),
           ram_filtered.c.min_unit.label('ram_min_unit'),
           ram_filtered.c.max_unit.label('ram_max_unit'),
           ram_filtered.c.allocation_ratio.label('ram_allocation_ratio'),
           ram_usage.c.used.label('ram_used'),
           cpu_filtered.c.total.label('cpu_total'),
           cpu_filtered.c.reserved.label('cpu_reserved'),
           cpu_filtered.c.min_unit.label('cpu_min_unit'),
           cpu_filtered.c.max_unit.label('cpu_max_unit'),
           cpu_filtered.c.allocation_ratio.label('cpu_allocation_ratio'),
           cpu_usage.c.used.label('cpu_used'),
        ])

    select = sa.select(cols_in_output).select_from(cpu_join)

    if args.filter_strategy == 'db':
        where_conds = (
            ram_filtered.c.min_unit <= res_template[const.RAM_MB],
            ram_filtered.c.max_unit >= res_template[const.RAM_MB],
            sql.func.floor((ram_filtered.c.total - ram_filtered.c.reserved) * ram_filtered.c.allocation_ratio)
            - sql.func.ifnull(ram_usage.c.used, 0) >= res_template[const.RAM_MB],
            cpu_filtered.c.min_unit <= res_template[const.VCPU],
            cpu_filtered.c.max_unit >= res_template[const.VCPU],
            sql.func.floor((cpu_filtered.c.total - cpu_filtered.c.reserved) * cpu_filtered.c.allocation_ratio)
            - sql.func.ifnull(cpu_usage.c.used, 0) >= res_template[const.VCPU]
        )
        if args.partition_strategy == 'modulo':
            where_conds += ((cn_tbl.c.id % args.workers) == worker_number,)

        if args.placement_strategy == 'pack':
            select = select.order_by(sql.func.ifnull(ram_usage.c.used, 0), cn_tbl.c.id)
        if args.placement_strategy == 'spread':
            select = select.order_by(sql.func.ifnull(ram_usage.c.used, 0).desc(), cn_tbl.c.id)
        if args.placement_strategy == 'random':
            # The scheduler could keep a cache of the number of compute
            # nodes in the system. But here, we emulate that cache by
            # simply picking a random compute node ID by selecting a
            # random ID since we know the compute nodes are
            # sequentially ordered and a fixed number.
            num_compute_nodes = (args.rows * args.racks * args.nodes)
            num_shared_resource_pools = args.rows * (1 if args.shared_storage_per_row else 0)
            random_compute_node_id = random.randint(1 + num_shared_resource_pools,
                                                    num_compute_nodes + num_shared_resource_pools)
            where_conds += (cn_tbl.c.id >= random_compute_node_id,)

        select = select.where(sql.and_(*where_conds))
        select = select.limit(1)

    return select
Пример #46
0
def get_old_messages_backend(request, user_profile,
                             anchor = REQ(converter=int),
                             num_before = REQ(converter=to_non_negative_int),
                             num_after = REQ(converter=to_non_negative_int),
                             narrow = REQ('narrow', converter=narrow_parameter, default=None),
                             use_first_unread_anchor = REQ(default=False, converter=ujson.loads),
                             apply_markdown=REQ(default=True,
                                                converter=ujson.loads)):
    # type: (HttpRequest, UserProfile, int, int, int, Optional[List[Dict[str, Any]]], bool, bool) -> HttpResponse
    include_history = ok_to_include_history(narrow, user_profile.realm)

    if include_history and not use_first_unread_anchor:
        query = select([column("id").label("message_id")], None, table("zerver_message"))
        inner_msg_id_col = literal_column("zerver_message.id")
    elif narrow is None:
        query = select([column("message_id"), column("flags")],
                       column("user_profile_id") == literal(user_profile.id),
                       table("zerver_usermessage"))
        inner_msg_id_col = column("message_id")
    else:
        # TODO: Don't do this join if we're not doing a search
        query = select([column("message_id"), column("flags")],
                       column("user_profile_id") == literal(user_profile.id),
                       join(table("zerver_usermessage"), table("zerver_message"),
                            literal_column("zerver_usermessage.message_id") ==
                            literal_column("zerver_message.id")))
        inner_msg_id_col = column("message_id")

    num_extra_messages = 1
    is_search = False

    if narrow is not None:
        # Add some metadata to our logging data for narrows
        verbose_operators = []
        for term in narrow:
            if term['operator'] == "is":
                verbose_operators.append("is:" + term['operand'])
            else:
                verbose_operators.append(term['operator'])
        request._log_data['extra'] = "[%s]" % (",".join(verbose_operators),)

        # Build the query for the narrow
        num_extra_messages = 0
        builder = NarrowBuilder(user_profile, inner_msg_id_col)
        search_term = None # type: Optional[Dict[str, Any]]
        for term in narrow:
            if term['operator'] == 'search':
                if not is_search:
                    search_term = term
                    query = query.column(column("subject")).column(column("rendered_content"))
                    is_search = True
                else:
                    # Join the search operators if there are multiple of them
                    search_term['operand'] += ' ' + term['operand']
            else:
                query = builder.add_term(query, term)
        if is_search:
            query = builder.add_term(query, search_term)

    # We add 1 to the number of messages requested if no narrow was
    # specified to ensure that the resulting list always contains the
    # anchor message.  If a narrow was specified, the anchor message
    # might not match the narrow anyway.
    if num_after != 0:
        num_after += num_extra_messages
    else:
        num_before += num_extra_messages

    sa_conn = get_sqlalchemy_connection()
    if use_first_unread_anchor:
        condition = column("flags").op("&")(UserMessage.flags.read.mask) == 0

        # We exclude messages on muted topics when finding the first unread
        # message in this narrow
        muting_conditions = exclude_muting_conditions(user_profile, narrow)
        if muting_conditions:
            condition = and_(condition, *muting_conditions)

        first_unread_query = query.where(condition)
        first_unread_query = first_unread_query.order_by(inner_msg_id_col.asc()).limit(1)
        first_unread_result = list(sa_conn.execute(first_unread_query).fetchall())
        if len(first_unread_result) > 0:
            anchor = first_unread_result[0][0]
        else:
            anchor = LARGER_THAN_MAX_MESSAGE_ID

    before_query = None
    after_query = None
    if num_before != 0:
        before_anchor = anchor
        if num_after != 0:
            # Don't include the anchor in both the before query and the after query
            before_anchor = anchor - 1
        before_query = query.where(inner_msg_id_col <= before_anchor) \
                            .order_by(inner_msg_id_col.desc()).limit(num_before)
    if num_after != 0:
        after_query = query.where(inner_msg_id_col >= anchor) \
                           .order_by(inner_msg_id_col.asc()).limit(num_after)

    if anchor == LARGER_THAN_MAX_MESSAGE_ID:
        # There's no need for an after_query if we're targeting just the target message.
        after_query = None

    if before_query is not None:
        if after_query is not None:
            query = union_all(before_query.self_group(), after_query.self_group())
        else:
            query = before_query
    elif after_query is not None:
        query = after_query
    else:
        # This can happen when a narrow is specified.
        query = query.where(inner_msg_id_col == anchor)

    main_query = alias(query)
    query = select(main_query.c, None, main_query).order_by(column("message_id").asc())
    # This is a hack to tag the query we use for testing
    query = query.prefix_with("/* get_old_messages */")
    query_result = list(sa_conn.execute(query).fetchall())

    # The following is a little messy, but ensures that the code paths
    # are similar regardless of the value of include_history.  The
    # 'user_messages' dictionary maps each message to the user's
    # UserMessage object for that message, which we will attach to the
    # rendered message dict before returning it.  We attempt to
    # bulk-fetch rendered message dicts from remote cache using the
    # 'messages' list.
    search_fields = dict() # type: Dict[int, Dict[str, Text]]
    message_ids = [] # type: List[int]
    user_message_flags = {} # type: Dict[int, List[str]]
    if include_history:
        message_ids = [row[0] for row in query_result]

        # TODO: This could be done with an outer join instead of two queries
        user_message_flags = dict((user_message.message_id, user_message.flags_list()) for user_message in
                                  UserMessage.objects.filter(user_profile=user_profile,
                                                             message__id__in=message_ids))
        for row in query_result:
            message_id = row[0]
            if user_message_flags.get(message_id) is None:
                user_message_flags[message_id] = ["read", "historical"]
            if is_search:
                (_, subject, rendered_content, content_matches, subject_matches) = row
                search_fields[message_id] = get_search_fields(rendered_content, subject,
                                                              content_matches, subject_matches)
    else:
        for row in query_result:
            message_id = row[0]
            flags = row[1]
            user_message_flags[message_id] = parse_usermessage_flags(flags)

            message_ids.append(message_id)

            if is_search:
                (_, _, subject, rendered_content, content_matches, subject_matches) = row
                search_fields[message_id] = get_search_fields(rendered_content, subject,
                                                              content_matches, subject_matches)

    cache_transformer = lambda row: MessageDict.build_dict_from_raw_db_row(row, apply_markdown)
    id_fetcher = lambda row: row['id']

    message_dicts = generic_bulk_cached_fetch(lambda message_id: to_dict_cache_key_id(message_id, apply_markdown),
                                              Message.get_raw_db_rows,
                                              message_ids,
                                              id_fetcher=id_fetcher,
                                              cache_transformer=cache_transformer,
                                              extractor=extract_message_dict,
                                              setter=stringify_message_dict)

    message_list = []
    for message_id in message_ids:
        msg_dict = message_dicts[message_id]
        msg_dict.update({"flags": user_message_flags[message_id]})
        msg_dict.update(search_fields.get(message_id, {}))
        message_list.append(msg_dict)

    statsd.incr('loaded_old_messages', len(message_list))
    ret = {'messages': message_list,
           "result": "success",
           "msg": ""}
    return json_success(ret)
Пример #47
0
def get_jobs_status_from_components(user, topic_id, type_id):

    # List of job meaningfull job status for global overview
    #
    # ie. If current job status is running, we should retrieve status
    # from prior job.
    valid_status = ['failure', 'product-failure', 'deployment-failure',
                    'success']

    topic_id = v1_utils.verify_existence_and_get(topic_id, _TABLE, get_id=True)
    v1_utils.verify_team_in_topic(user, topic_id)

    # Get list of all remotecis that are attached to a topic this type belongs
    # to
    fields = [models.REMOTECIS.c.id.label('remoteci_id'),
              models.REMOTECIS.c.name.label('remoteci_name'),
              models.TEAMS.c.id.label('team_id'),
              models.TEAMS.c.name.label('team_name'),
              models.TOPICS.c.name.label('topic_name'),
              models.COMPONENTS.c.id.label('component_id'),
              models.COMPONENTS.c.name.label('component_name'),
              models.COMPONENTS.c.type.label('component_type'),
              models.JOBS.c.id.label('job_id'),
              models.JOBS.c.status.label('job_status'),
              models.JOBS.c.created_at.label('job_created_at')]
    query = (sql.select(fields)
             .select_from(
                 sql.join(
                     models.REMOTECIS,
                     models.JOBS,
                     models.REMOTECIS.c.id == models.JOBS.c.remoteci_id,
                     isouter=True)
             .join(
                 models.JOIN_JOBS_COMPONENTS,
                 models.JOIN_JOBS_COMPONENTS.c.job_id == models.JOBS.c.id)
             .join(
                 models.COMPONENTS,
                 models.COMPONENTS.c.id == models.JOIN_JOBS_COMPONENTS.c.component_id)  # noqa
             .join(
                 models.TOPICS,
                 models.TOPICS.c.id == models.COMPONENTS.c.topic_id)
             .join(
                 models.TEAMS,
                 models.TEAMS.c.id == models.JOBS.c.team_id))
             .where(
                 sql.and_(
                     models.REMOTECIS.c.state == 'active',
                     models.TEAMS.c.external == True,  # noqa
                     models.JOBS.c.status.in_(valid_status),
                     models.JOBS.c.state != 'archived',
                     models.COMPONENTS.c.type == type_id,
                     models.TOPICS.c.id == topic_id))
             .order_by(
                 models.REMOTECIS.c.id,
                 models.JOBS.c.created_at.desc())
             .distinct(models.REMOTECIS.c.id))

    if not user.is_super_admin():
        query.append_whereclause(models.TEAMS.c.id.in_(user.teams))
    rcs = flask.g.db_conn.execute(query).fetchall()
    nb_row = len(rcs)

    return flask.jsonify({'jobs': rcs,
                          '_meta': {'count': nb_row}})
Пример #48
0
    def get_items_from_query(self, library):
        """Gets identifiers and its related title, medium, and authors from the
        database.
        Keeps track of the current 'ISBN' identifier and current item object that
        is being processed. If the next ISBN being processed is new, the existing one
        gets added to the list of items. If the ISBN is the same, then we append
        the Author property since there are multiple contributors.

        :return: a list of Novelist objects to send
        """
        collectionList = []
        for c in library.collections:
            collectionList.append(c.id)

        LEFT_OUTER_JOIN = True
        i1 = aliased(Identifier)
        i2 = aliased(Identifier)
        roles = list(Contributor.AUTHOR_ROLES)
        roles.append(Contributor.NARRATOR_ROLE)

        isbnQuery = select(
            [i1.identifier, i1.type, i2.identifier,
            Edition.title, Edition.medium, Edition.published,
            Contribution.role, Contributor.sort_name,
            DataSource.name],
        ).select_from(
            join(LicensePool, i1, i1.id==LicensePool.identifier_id)
            .join(Equivalency, i1.id==Equivalency.input_id, LEFT_OUTER_JOIN)
            .join(i2, Equivalency.output_id==i2.id, LEFT_OUTER_JOIN)
            .join(
                Edition,
                or_(Edition.primary_identifier_id==i1.id, Edition.primary_identifier_id==i2.id)
            )
            .join(Contribution, Edition.id==Contribution.edition_id)
            .join(Contributor, Contribution.contributor_id==Contributor.id)
            .join(DataSource, DataSource.id==LicensePool.data_source_id)
        ).where(
            and_(
                LicensePool.collection_id.in_(collectionList),
                or_(i1.type=="ISBN", i2.type=="ISBN"),
                or_(Contribution.role.in_(roles))
            )
        ).order_by(i1.identifier, i2.identifier)

        result = self._db.execute(isbnQuery)

        items = []
        newItem = None
        existingItem = None
        currentIdentifier = None

        # Loop through the query result. There's a need to keep track of the
        # previously processed object and the currently processed object because
        # the identifier could be the same. If it is, we update the data
        # object to send to Novelist.
        for item in result:
            if newItem:
                existingItem = newItem
            (currentIdentifier, existingItem, newItem, addItem) = (
                self.create_item_object(item, currentIdentifier, existingItem)
            )

            if addItem and existingItem:
                # The Role property isn't needed in the actual request.
                del existingItem['role']
                items.append(existingItem)

        # For the case when there's only one item in `result`
        if newItem:
            del newItem['role']
            items.append(newItem)

        return items
Пример #49
0
def _check_capacity_exceeded(conn, allocs):
    """Checks to see if the supplied allocation records would result in any of
    the inventories involved having their capacity exceeded.

    Raises an InvalidAllocationCapacityExceeded exception if any inventory
    would be exhausted by the allocation. Raises an
    InvalidAllocationConstraintsViolated exception if any of the `step_size`,
    `min_unit` or `max_unit` constraints in an inventory will be violated
    by any one of the allocations.

    If no inventories would be exceeded or violated by the allocations, the
    function returns a list of `ResourceProvider` objects that contain the
    generation at the time of the check.

    :param conn: SQLalchemy Connection object to use
    :param allocs: List of `Allocation` objects to check
    """
    # The SQL generated below looks like this:
    # SELECT
    #   rp.id,
    #   rp.uuid,
    #   rp.generation,
    #   inv.resource_class_id,
    #   inv.total,
    #   inv.reserved,
    #   inv.allocation_ratio,
    #   allocs.used
    # FROM resource_providers AS rp
    # JOIN inventories AS i1
    # ON rp.id = i1.resource_provider_id
    # LEFT JOIN (
    #    SELECT resource_provider_id, resource_class_id, SUM(used) AS used
    #    FROM allocations
    #    WHERE resource_class_id IN ($RESOURCE_CLASSES)
    #    GROUP BY resource_provider_id, resource_class_id
    # ) AS allocs
    # ON inv.resource_provider_id = allocs.resource_provider_id
    # AND inv.resource_class_id = allocs.resource_class_id
    # WHERE rp.uuid IN ($RESOURCE_PROVIDERS)
    # AND inv.resource_class_id IN ($RESOURCE_CLASSES)
    #
    # We then take the results of the above and determine if any of the
    # inventory will have its capacity exceeded.
    rc_ids = set([_RC_CACHE.id_from_string(a.resource_class)
                       for a in allocs])
    provider_uuids = set([a.resource_provider.uuid for a in allocs])

    usage = sa.select([_ALLOC_TBL.c.resource_provider_id,
                       _ALLOC_TBL.c.consumer_id,
                       _ALLOC_TBL.c.resource_class_id,
                       sql.func.sum(_ALLOC_TBL.c.used).label('used')])
    usage = usage.where(_ALLOC_TBL.c.resource_class_id.in_(rc_ids))
    usage = usage.group_by(_ALLOC_TBL.c.resource_provider_id,
                           _ALLOC_TBL.c.resource_class_id)
    usage = sa.alias(usage, name='usage')

    inv_join = sql.join(_RP_TBL, _INV_TBL,
            sql.and_(_RP_TBL.c.id == _INV_TBL.c.resource_provider_id,
                     _INV_TBL.c.resource_class_id.in_(rc_ids)))
    primary_join = sql.outerjoin(inv_join, usage,
        sql.and_(
            _INV_TBL.c.resource_provider_id == usage.c.resource_provider_id,
            _INV_TBL.c.resource_class_id == usage.c.resource_class_id)
    )
    cols_in_output = [
        _RP_TBL.c.id.label('resource_provider_id'),
        _RP_TBL.c.uuid,
        _RP_TBL.c.generation,
        _INV_TBL.c.resource_class_id,
        _INV_TBL.c.total,
        _INV_TBL.c.reserved,
        _INV_TBL.c.allocation_ratio,
        _INV_TBL.c.min_unit,
        _INV_TBL.c.max_unit,
        _INV_TBL.c.step_size,
        usage.c.used,
    ]

    sel = sa.select(cols_in_output).select_from(primary_join)
    sel = sel.where(
            sa.and_(_RP_TBL.c.uuid.in_(provider_uuids),
                    _INV_TBL.c.resource_class_id.in_(rc_ids)))
    records = conn.execute(sel)
    # Create a map keyed by (rp_uuid, res_class) for the records in the DB
    usage_map = {}
    provs_with_inv = set()
    for record in records:
        map_key = (record['uuid'], record['resource_class_id'])
        if map_key in usage_map:
            raise KeyError("%s already in usage_map, bad query" % str(map_key))
        usage_map[map_key] = record
        provs_with_inv.add(record["uuid"])
    # Ensure that all providers have existing inventory
    missing_provs = provider_uuids - provs_with_inv
    if missing_provs:
        class_str = ', '.join([_RC_CACHE.string_from_id(rc_id)
                               for rc_id in rc_ids])
        provider_str = ', '.join(missing_provs)
        raise exception.InvalidInventory(resource_class=class_str,
                resource_provider=provider_str)

    res_providers = {}
    for alloc in allocs:
        rc_id = _RC_CACHE.id_from_string(alloc.resource_class)
        rp_uuid = alloc.resource_provider.uuid
        key = (rp_uuid, rc_id)
        usage = usage_map[key]
        amount_needed = alloc.used
        allocation_ratio = usage['allocation_ratio']
        min_unit = usage['min_unit']
        max_unit = usage['max_unit']
        step_size = usage['step_size']

        # check min_unit, max_unit, step_size
        if (amount_needed < min_unit or amount_needed > max_unit or
                amount_needed % step_size != 0):
            LOG.warning(
                _LW("Allocation for %(rc)s on resource provider %(rp)s "
                    "violates min_unit, max_unit, or step_size. "
                    "Requested: %(requested)s, min_unit: %(min_unit)s, "
                    "max_unit: %(max_unit)s, step_size: %(step_size)s"),
                {'rc': alloc.resource_class,
                 'rp': rp_uuid,
                 'requested': amount_needed,
                 'min_unit': min_unit,
                 'max_unit': max_unit,
                 'step_size': step_size})
            raise exception.InvalidAllocationConstraintsViolated(
                resource_class=alloc.resource_class,
                resource_provider=rp_uuid)

        # usage["used"] can be returned as None
        used = usage['used'] or 0
        capacity = (usage['total'] - usage['reserved']) * allocation_ratio
        if capacity < (used + amount_needed):
            LOG.warning(
                _LW("Over capacity for %(rc)s on resource provider %(rp)s. "
                    "Needed: %(needed)s, Used: %(used)s, Capacity: %(cap)s"),
                {'rc': alloc.resource_class,
                 'rp': rp_uuid,
                 'needed': amount_needed,
                 'used': used,
                 'cap': capacity})
            raise exception.InvalidAllocationCapacityExceeded(
                resource_class=alloc.resource_class,
                resource_provider=rp_uuid)
        if rp_uuid not in res_providers:
            rp = ResourceProvider(id=usage['resource_provider_id'],
                                  uuid=rp_uuid,
                                  generation=usage['generation'])
            res_providers[rp_uuid] = rp
    return list(res_providers.values())