コード例 #1
0
ファイル: grids.py プロジェクト: AbhishekKumarSingh/galaxy
 def sort( self, trans, query, ascending, column_name=None ):
     def get_foreign_key( source_class, target_class ):
         """ Returns foreign key in source class that references target class. """
         target_fk = None
         for fk in source_class.table.foreign_keys:
             if fk.references( target_class.table ):
                 target_fk = fk
                 break
         if not target_fk:
             raise RuntimeException( "No foreign key found between objects: %s, %s" % source_class.table, target_class.table )
         return target_fk
     # Get the columns that connect item's table and item's rating association table.
     item_rating_assoc_class = getattr( trans.model, '%sRatingAssociation' % self.model_class.__name__ )
     foreign_key = get_foreign_key( item_rating_assoc_class, self.model_class )
     fk_col = foreign_key.parent
     referent_col = foreign_key.get_referent( self.model_class.table )
     # Do sorting using a subquery.
     # Subquery to get average rating for each item.
     ave_rating_subquery = trans.sa_session.query( fk_col,
                                                   func.avg( item_rating_assoc_class.table.c.rating ).label('avg_rating') ) \
                                           .group_by( fk_col ) \
                                           .subquery()
     # Integrate subquery into main query.
     query = query.outerjoin( (ave_rating_subquery, referent_col == ave_rating_subquery.columns[fk_col.name]) )
     # Sort using subquery results; use coalesce to avoid null values.
     if not ascending:  # TODO: for now, reverse sorting b/c first sort is ascending, and that should be the natural sort.
         query = query.order_by( func.coalesce( ave_rating_subquery.c.avg_rating, 0 ).asc() )
     else:
         query = query.order_by( func.coalesce( ave_rating_subquery.c.avg_rating, 0 ).desc() )
     return query
コード例 #2
0
ファイル: invoice.py プロジェクト: Imaxinacion/billy
    def refund(self, invoice, amount):
        """Refund the invoice

        """
        Transaction = tables.Transaction
        tx_model = self.factory.create_transaction_model()
        transactions = []

        self.get(invoice.guid, with_lockmode='update')

        if invoice.status != self.statuses.SETTLED:
            raise InvalidOperationError('You can only refund a settled invoice')

        refunded_amount = (
            self.session.query(
                func.coalesce(func.sum(Transaction.amount), 0)
            )
            .filter(
                Transaction.invoice == invoice,
                Transaction.transaction_type == TransactionModel.types.REFUND,
                Transaction.submit_status.in_([
                    TransactionModel.submit_statuses.STAGED,
                    TransactionModel.submit_statuses.RETRYING,
                    TransactionModel.submit_statuses.DONE,
                ])
            )
        ).scalar()
        # Make sure do not allow refund more than effective amount
        if refunded_amount + amount > invoice.effective_amount:
            raise InvalidOperationError(
                'Refund total amount {} + {} will exceed invoice effective amount {}'
                .format(
                    refunded_amount,
                    amount,
                    invoice.effective_amount,
                )
            )

        # the settled transaction
        settled_transaction = (
            self.session.query(Transaction)
            .filter(
                Transaction.invoice == invoice,
                Transaction.transaction_type == TransactionModel.types.DEBIT,
                Transaction.submit_status == TransactionModel.submit_statuses.DONE,
            )
        ).one()

        # create the refund transaction
        transaction = tx_model.create(
            invoice=invoice,
            transaction_type=TransactionModel.types.REFUND,
            amount=amount,
            reference_to=settled_transaction,
        )
        transactions.append(transaction)
        return transactions
コード例 #3
0
ファイル: tables.py プロジェクト: remotesyssupport/billy
    def total_adjustment_amount(self):
        """Sum of total adjustment amount

        """
        from sqlalchemy import func
        session = object_session(self)
        return (
            session.query(func.coalesce(func.sum(Adjustment.amount), 0))
            .filter(Adjustment.invoice_guid == self.guid)
            .scalar()
        )
コード例 #4
0
 def contents(self, ctx, rev=None, **kwargs):
     if rev and str(rev).lower() != 'head':
         _rev = ctx.db.query(Revision).filter(
             Revision.script_id == self.id,
             Revision.version == rev).first()
     else:
         _rev = ctx.db.query(Revision).filter(
             Revision.script_id == self.id,
             func.coalesce(Revision.draft, False) != True  # noqa
             ).order_by(Revision.created_at.desc()).first()
     if _rev:
         return _rev
     else:
         return None
コード例 #5
0
ファイル: team.py プロジェクト: Immortalem/fluxscoreboard
def update_score(connection, update_all=True):
    """
    Update the score of all teams. If ``update_all`` is set, the points
    for all challenges are updated beforehand as well.

    This is your one-shot function to create up-to-date points for everything.
    """
    from fluxscoreboard.models import dynamic_challenges
    if update_all:
        update_challenge_points(connection, update_team_count=True)
    bonus_col = func.sum(Challenge._points - Challenge.base_points)
    bonus_score = (select([func.coalesce(bonus_col, 0)]).
                   where(Challenge.id == Submission.challenge_id).
                   where(Team.id == Submission.team_id).
                   where(~Challenge.dynamic).
                   where(Challenge.published).
                   correlate(Team))

    # base score
    challenge_sum = func.coalesce(func.sum(Challenge.base_points), 0)
    # first blood
    fb_sum = func.coalesce(func.sum(Submission.additional_pts), 0)

    points_col = challenge_sum + fb_sum
    for module in dynamic_challenges.registry.values():
        points_col += module.get_points_query(Team)
    base_score = (select([points_col]).
                  where(Challenge.id == Submission.challenge_id).
                  where(Team.id == Submission.team_id).
                  where(~Challenge.dynamic).
                  where(Challenge.published).
                  correlate(Team))
    query = (Team.__table__.update().
             where(Team.active).
             values(base_score=base_score,
                    bonus_score=bonus_score))
    connection.execute(query)
コード例 #6
0
ファイル: copr_scheduler.py プロジェクト: msimacek/koschei
    def main(self):
        running_builds = self.db.query(CoprRebuild)\
            .filter(CoprRebuild.state == Build.RUNNING)\
            .count()
        if running_builds >= get_config('copr.max_builds'):
            self.log.debug("{} running builds, not scheduling".format(running_builds))
            return

        last_index = self.db.query(
            func.coalesce(
                func.max(CoprRebuildRequest.scheduler_queue_index),
                0
            )
        ).scalar() + 1
        request = self.db.query(CoprRebuildRequest)\
            .order_by(CoprRebuildRequest.scheduler_queue_index.nullsfirst())\
            .filter(CoprRebuildRequest.state == 'in progress')\
            .first()
        if not request:
            self.log.debug("No schedulable requests")
            return

        # move all requests of given user to the queue end
        self.db.query(CoprRebuildRequest)\
            .filter(CoprRebuildRequest.user_id == request.user_id)\
            .update({'scheduler_queue_index': last_index})

        build_count = self.db.query(CoprRebuild)\
            .filter(CoprRebuild.request_id == request.id)\
            .filter(CoprRebuild.copr_build_id != None)\
            .count()
        if build_count >= request.schedule_count:
            request.state = 'scheduled'
            self.db.commit()
            return

        rebuild = self.db.query(CoprRebuild)\
            .filter(CoprRebuild.copr_build_id == None)\
            .order_by(CoprRebuild.order)\
            .first()

        if not rebuild:
            request.state = 'scheduled'
            self.db.commit()
        else:
            self.schedule_rebuild(rebuild)
コード例 #7
0
def revision_before_insert(mapper, connection, target):
    if target.script_id:
        scr_id = target.script_id
    elif target.script:
        scr_id = target.script.id
    else:
        scr_id = None

    if scr_id and not target.version and not target.draft:
        q = select(
            [func.coalesce(
                select([cast(Revision.version, Integer) + 1], limit=1).
                where(Revision.script_id == scr_id).
                group_by(Revision.id).
             having(Revision.id == func.max(Revision.id)).
             order_by(Revision.id.desc()).
             as_scalar(), 1)])
        target.version = connection.scalar(q)
コード例 #8
0
ファイル: account.py プロジェクト: ToroidalATLAS/openspending
    def scoreboard(self, format='html'):
        """
        A list of users ordered by their score. The score is computed by
        by assigning every dataset a score (10 divided by no. of maintainers)
        and then adding that score up for all maintainers.

        This does give users who maintain a single dataset a higher score than
        those who are a part of a maintenance team, which is not really what
        we want (since that rewards single points of failure in the system).

        But this is an adequate initial score and this will only be accessible
        to administrators (who may be interested in findin these single points
        of failures).
        """

        # If user is not an administrator we abort
        if not (c.account and c.account.admin):
            abort(403, _("You are not authorized to view this page"))

        # Assign scores to each dataset based on number of maintainers
        score = db.session.query(Dataset.id,
                                 (10 / func.count(Account.id)).label('sum'))
        score = score.join('managers').group_by(Dataset.id).subquery()

        # Order users based on their score which is the sum of the dataset
        # scores they maintain
        user_score = db.session.query(
            Account.name, Account.email,
            func.coalesce(func.sum(score.c.sum), 0).label('score'))
        user_score = user_score.outerjoin(Account.datasets).outerjoin(score)
        user_score = user_score.group_by(Account.name, Account.email)
        # We exclude the system user
        user_score = user_score.filter(Account.name != 'system')
        user_score = user_score.order_by(desc('score'))

        # Fetch all and assign to a context variable score and paginate them
        # We paginate 42 users per page, just because that's an awesome number
        scores = user_score.all()
        c.page = templating.Page(scores, items_per_page=42,
                                 item_count=len(scores),
                                 **request.params)

        return templating.render('account/scoreboard.html')
コード例 #9
0
ファイル: views.py プロジェクト: pombredanne/glottolog3
def getLanguoids(name=False,
                 iso=False,
                 namequerytype='part',
                 country=False,
                 multilingual=False,
                 inactive=False):
    """return an array of languoids responding to the specified criterion.
    """
    if not (name or iso or country):
        return []

    query = DBSession.query(Languoid)\
        .options(joinedload(Languoid.family))\
        .order_by(Languoid.name)

    if not inactive:
        query = query.filter(Language.active == True)

    if name:
        namequeryfilter = {
            "regex": func.lower(Identifier.name).like(name.lower()),
            "part": func.lower(Identifier.name).contains(name.lower()),
            "whole": func.lower(Identifier.name) == name.lower(),
        }[namequerytype if namequerytype in ('regex', 'whole') else 'part']

        crit = [Identifier.type == 'name', namequeryfilter]
        if not multilingual:
            crit.append(func.coalesce(Identifier.lang, '').in_((u'', u'eng', u'en')))
        query = query.filter(Language.identifiers.any(and_(*crit)))
    elif country:
        return []  # pragma: no cover
    else:
        query = query.join(LanguageIdentifier, Identifier)\
            .filter(Identifier.name.contains(iso.lower()))\
            .filter(Identifier.type == IdentifierType.iso.value)

    return query
コード例 #10
0
ファイル: views.py プロジェクト: clld/glottolog3
def getLanguoids(name=False,
                 iso=False,
                 namequerytype='part',
                 country=False,
                 multilingual=False,
                 inactive=False):
    """return an array of languoids responding to the specified criterion.
    """
    if not (name or iso or country):
        return []

    query = DBSession.query(Languoid)\
        .options(joinedload(Languoid.family))\
        .order_by(Languoid.name)

    if not inactive:
        query = query.filter(Language.active == True)

    if name:
        crit = [Identifier.type == 'name']
        ul_iname = func.unaccent(func.lower(Identifier.name))
        ul_name = func.unaccent(name.lower())
        if namequerytype == 'whole':
            crit.append(ul_iname == ul_name)
        else:
            crit.append(ul_iname.contains(ul_name))
        if not multilingual:
            crit.append(func.coalesce(Identifier.lang, '').in_((u'', u'eng', u'en')))
        crit = Language.identifiers.any(and_(*crit))
        query = query.filter(or_(icontains(Languoid.name, name), crit))
    elif country:
        return []  # pragma: no cover
    else:
        query = query.join(LanguageIdentifier, Identifier)\
            .filter(Identifier.type == IdentifierType.iso.value)\
            .filter(Identifier.name.contains(iso.lower()))
    return query
コード例 #11
0
def get_status_by_entity():
    result = {}

    # FIXME: use user.status_level instead of duplicating the levels scale
    q_users = (session.query(
        UserData.uid,
        UserData.corporation_id.label('corporation_id'),
        UserData.direction_id.label('direction_id'),
        func.sum(func.coalesce(PointData.nb_points, 0)).label('points')
    ).filter(or_(PointData.label == None, PointData.label != u'GIFT_BOUGHT')))

    q_users = (
        q_users.filter(UserData.enabled == True)
        .group_by(UserData.corporation_id, UserData.direction_id, UserData.uid)
        .outerjoin(UserData.points)
        .subquery())

    q = (session.query(q_users.c.corporation_id, q_users.c.direction_id,
                       func.count(q_users.c.uid).label('user_count'))
         .group_by(q_users.c.corporation_id, q_users.c.direction_id)
         .order_by(q_users.c.corporation_id, q_users.c.direction_id))

    q0 = q.filter(q_users.c.points == 0)

    for elt in q0:
        t = result.get((elt[0], elt[1]), {})
        t.update(status_level0=elt[2])
        result[(elt[0], elt[1])] = t

    q1 = q.filter(q_users.c.points > 0).filter(q_users.c.points <= 100)

    for elt in q1:
        t = result.get((elt[0], elt[1]), {})
        t.update(status_level1=elt[2])
        result[(elt[0], elt[1])] = t

    q2 = q.filter(q_users.c.points > 100).filter(q_users.c.points <= 200)

    for elt in q2:
        t = result.get((elt[0], elt[1]), {})
        t.update(status_level2=elt[2])
        result[(elt[0], elt[1])] = t

    q3 = q.filter(q_users.c.points > 200).filter(q_users.c.points <= 2000)

    for elt in q3:
        t = result.get((elt[0], elt[1]), {})
        t.update(status_level3=elt[2])
        result[(elt[0], elt[1])] = t

    q4 = q.filter(q_users.c.points > 2000).filter(q_users.c.points <= 10000)

    for elt in q4:
        t = result.get((elt[0], elt[1]), {})
        t.update(status_level4=elt[2])
        result[(elt[0], elt[1])] = t

    q5 = q.filter(q_users.c.points > 10000)

    for elt in q5:
        t = result.get((elt[0], elt[1]), {})
        t.update(status_level5=elt[2])
        result[(elt[0], elt[1])] = t

    return result
コード例 #12
0
def visit_hierarchy(element, compiler, **kw):
    """visit compilation idiom for pgsql"""
    if compiler.dialect.server_version_info < supported_db['postgresql']:
        raise(HierarchyLesserError(compiler.dialect.name,
                                   supported_db['postgresql']))
    else:
        if element.fk_type == String:
            element.fk_type = String(element.type_length)
            val = "a"
        else:
            element.fk_type = Integer
            val = "0"
        ordering_colname = element.ordering_colname
        if not ordering_colname or ordering_colname not in element.table.c\
                and ordering_colname not in element.select.c:
            ordering_colname = None
        # FIXME: pass type of ordering column in following call if it's not 
        # Integer
        is_ordering = ordering_colname and ordering_colname in \
                element.select.columns
        rec = _build_table_clause(element.select, 'rec', 
                element.fk_type, ordering_colname if is_ordering else None)
        # documentation used for pgsql >= 8.4.0
        #
        # * http://www.postgresql.org/docs/8.4/static/queries-with.html
        # * http://explainextended.com/2009/07/17/
        #   postgresql-8-4-preserving-order-for-hierarchical-query/
        #
        # build the first select
        sel1 = element.select._clone()
        sel1._copy_internals()
        # if the user wants to start from a given node, he pass the
        # starting_node option in the query
        if hasattr(element, 'starting_node') and \
           getattr(element, 'starting_node') is not False:
            sel1 = sel1.where(func.coalesce(
                literal_column(element.parent, type_=String),
                literal(val, type_=String))==\
                literal(element.starting_node, type_=String))
        # the same select submitted by the user plus a 1 as the first level and
        # an array with the current id
        sel1.append_column(literal_column('1', type_=Integer).label('level'))
        sel1.append_column(literal_column('ARRAY[%s]' %(element.child), 
                                          type_=ARRAY(element.fk_type)).\
                           label('connect_path'))
        if is_ordering:
            ordering_col = sel1.c.get(ordering_colname, None)
            if ordering_col is None:
                ordering_col = element.table.c[ordering_colname]
            sel1.append_column(literal_column('ARRAY[%s]' % (ordering_colname,),
                                              type_=ARRAY(ordering_col.type)).\
                               label('%s_path' % (ordering_colname,))
            )
        # the non recursive part of the with query must return false for the
        # first values
        sel1.append_column(literal_column("false", type_=Boolean).\
                           label('cycle'))
        # build the second select
        # the same select as above plus the level column is summing a 1 for
        # each iteration on the same brach. We also append the current id to
        # the array of ids we're building as connect_path
        sel2 = element.select._clone()
        sel2._copy_internals()
        sel2.append_column(label('level', 
                                 rec.c.level+literal_column("1",
                                                            type_=Integer)))
        sel2.append_column(label('connect_path', 
                      func.array_append(rec.c.connect_path, 
                                        getattr(element.table.c, element.child))
                                ))
        if is_ordering:
            sel2.append_column(label('%s_path' % (ordering_colname),
                      func.array_append(rec.c['%s_path' % (ordering_colname,)],
                                        getattr(element.table.c, 
                                            ordering_colname))
                                ))
        # check if any member of connect_path has already been visited and
        # return true in that case, preventing an infinite loop (see where
        # section
        sel2.append_column(literal_column(
                "%s=ANY(connect_path)" % getattr(element.table.c, 
                                                 element.child)).label('cycle'))
        sel2 = sel2.where(and_(
            getattr(element.table.c,element.parent)==getattr(rec.c,
                                                             element.child),
            "not cycle"))
        # union_all the previous queries so we can wrapped them in the 'with
        # recursive .. ()' idiom
        sel3 = sel1.union_all(sel2)
        # adding comparison in connect_path to build the is_leaf param
        new_sel = select([rec])
        # we know if a given node is a leaf by checking if the current
        # connect_path (an array of ids from the root to the present id) is
        # contained by the next row connect_path (we use the lead windowing
        # function for that). If it's contained it means the current id is not
        # a leaf, otherwise it is. 
        new_sel.append_column(
            literal_column("case connect_path <@ lead(connect_path, 1) over "\
                           "(order by connect_path) when true then false "\
                           "else true end").label('is_leaf')
        )
        qry = "with recursive rec as (%s)\n%s\norder by %s_path" %\
                (compiler.process(sel3),
                 new_sel,
                 ordering_colname if is_ordering else 'connect'
                )
        if kw.get('asfrom', False):
            qry = '(%s)' % qry
        return qry
コード例 #13
0
def generate_legs(keepto=None, maxtime=None, repair=False):
    """Record legs from stops and mobile activity found in device telemetry.

    keepto -- keep legs before this time, except last two or so for restart
    maxtime -- process device data up to this time
    repair -- re-evaluate and replace all changed legs"""

    now = datetime.datetime.now()
    if not keepto:
        keepto = now
    if not maxtime:
        maxtime = now

    print("generate_legs up to", maxtime)

    dd = db.metadata.tables["device_data"]
    legs = db.metadata.tables["legs"]

    # Find first and last point sent from each device.
    devmax = select(
        [   dd.c.device_id,
            func.min(dd.c.time).label("firstpoint"),
            func.max(dd.c.time).label("lastpoint")],
        dd.c.time < maxtime,
        group_by=dd.c.device_id).alias("devmax")

    # The last recorded leg transition may be to phantom move that, given more
    # future context, will be merged into a preceding stop. Go back two legs
    # for the rewrite start point.

    # Due to activity summing window context and stabilization, and stop
    # entry/exit refinement, the first transition after starting the filter
    # process is not necessarily yet in sync with the previous run. Go back
    # another two legs to start the process.

    # (The window bounds expression is not supported until sqlalchemy 1.1 so
    # sneak it in in the order expression...)
    order = text("""time_start DESC
        ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING""")
    rrlegs = select(
        [   legs.c.device_id,
            func.nth_value(legs.c.time_start, 4) \
                .over(partition_by=legs.c.device_id, order_by=order) \
                .label("rewind"),
            func.nth_value(legs.c.time_start, 2) \
                .over(partition_by=legs.c.device_id, order_by=order) \
                .label("rewrite")],
        and_(legs.c.activity != None, legs.c.time_start <= keepto),
        distinct=True).alias("rrlegs")

    # Find end of processed legs, including terminator for each device.
    lastleg = select(
        [legs.c.device_id, func.max(legs.c.time_end).label("time_end")],
        legs.c.time_start < keepto,
        group_by=legs.c.device_id).alias("lastleg")

    # If trailing points exist, start from rewind leg, or first point
    starts = select(
        [   devmax.c.device_id,
            func.coalesce(rrlegs.c.rewind, devmax.c.firstpoint),
            func.coalesce(rrlegs.c.rewrite, devmax.c.firstpoint)],
        or_(lastleg.c.time_end == None,
            devmax.c.lastpoint > lastleg.c.time_end),
        devmax \
            .outerjoin(rrlegs, devmax.c.device_id == rrlegs.c.device_id) \
            .outerjoin(lastleg, devmax.c.device_id == lastleg.c.device_id))

    # In repair mode, just start from the top.
    if repair:
        starts = select([
            devmax.c.device_id,
            devmax.c.firstpoint.label("rewind"),
            devmax.c.firstpoint.label("start")])

    starts = starts.order_by(devmax.c.device_id)
    for device, rewind, start in db.engine.execute(starts):
        query = select(
            [   func.ST_AsGeoJSON(dd.c.coordinate).label("geojson"),
                dd.c.accuracy,
                dd.c.time,
                dd.c.device_id,
                dd.c.activity_1, dd.c.activity_1_conf,
                dd.c.activity_2, dd.c.activity_2_conf,
                dd.c.activity_3, dd.c.activity_3_conf],
            and_(
                dd.c.device_id == device,
                dd.c.time >= rewind,
                dd.c.time < maxtime),
            order_by=dd.c.time)

        points = db.engine.execute(query).fetchall()

        print("d"+str(device), "resume", str(start)[:19], \
            "rewind", str(rewind)[:19], str(len(points))+"p")

        filterer = DeviceDataFilterer() # not very objecty rly
        lastend = None
        newlegs = filterer.generate_device_legs(points, start)

        for (prevleg, _), (leg, legmodes) in pairwise(
                chain([(None, None)], newlegs)):

          with db.engine.begin() as t:

            lastend = leg["time_end"]

            print(" ".join([
                "d"+str(device),
                str(leg["time_start"])[:19],
                str(leg["time_end"])[:19],
                leg["activity"]]), end=' ')

            # Adjust leg for db entry
            gj0 = leg.pop("geojson_start", None)
            gj1 = leg.pop("geojson_end", None)
            leg.update({
                "device_id": device,
                "coordinate_start": gj0 and func.ST_GeomFromGeoJSON(gj0),
                "coordinate_end": gj1 and func.ST_GeomFromGeoJSON(gj1)})

            # Deal with overlapping legs on rewind/repair
            legid = t.execute(select(
                [legs.c.id],
                and_(*(legs.c[c] == leg[c] for c in list(leg.keys()))))).scalar()
            if legid:
                print("-> unchanged", end=' ')
            else:
                overlapstart = prevleg and prevleg["time_end"] or start
                overlaps = [x[0] for x in t.execute(select(
                    [legs.c.id],
                    and_(
                        legs.c.device_id == leg["device_id"],
                        legs.c.time_start < leg["time_end"],
                        legs.c.time_end > overlapstart),
                    order_by=legs.c.time_start))]
                if overlaps:
                    legid, dels = overlaps[0], overlaps[1:]
                    t.execute(legs.update(legs.c.id == legid, leg))
                    print("-> update", end=' ')
                    if dels:
                        t.execute(legs.delete(legs.c.id.in_(dels)))
                        print("-> delete %d" % len(dels))
                else:
                    ins = legs.insert(leg).returning(legs.c.id)
                    legid = t.execute(ins).scalar()
                    print("-> insert", end=' ')

            # Delete mismatching modes, add new modes
            modes = db.metadata.tables["modes"]
            exmodes = {x[0]: x[1:] for x in t.execute(select(
                [modes.c.source, modes.c.mode, modes.c.line],
                legs.c.id == legid,
                legs.join(modes)))}
            for src in set(exmodes).union(legmodes):
                ex, nu = exmodes.get(src), legmodes.get(src)
                if nu == ex:
                    continue
                if ex is not None:
                    print("-> del", src, ex, end=' ')
                    t.execute(modes.delete(and_(
                        modes.c.leg == legid, modes.c.source == src)))
                if nu is not None:
                    print("-> ins", src, nu, end=' ')
                    t.execute(modes.insert().values(
                        leg=legid, source=src, mode=nu[0], line=nu[1]))

            print()

        # Emit null activity terminator leg to mark trailing undecided points,
        # if any, to avoid unnecessary reprocessing on resume.
        rejects = [x for x in points if not lastend or x["time"] > lastend]
        if rejects:
            db.engine.execute(legs.delete(and_(
                legs.c.device_id == device,
                legs.c.time_start <= rejects[-1]["time"],
                legs.c.time_end >= rejects[0]["time"])))
            db.engine.execute(legs.insert({
                "device_id": device,
                "time_start": rejects[0]["time"],
                "time_end": rejects[-1]["time"],
                "activity": None}))

    # Attach device legs to users.
    devices = db.metadata.tables["devices"]

    # Real legs from devices with the owner added in, also when unattached
    owned = select(
        [   devices.c.user_id.label("owner"),
            legs.c.id,
            legs.c.user_id,
            legs.c.time_start,
            legs.c.time_end],
        and_(legs.c.activity != None, legs.c.time_end < maxtime),
        devices.join(legs, devices.c.id == legs.c.device_id))

    detached = owned.where(legs.c.user_id.is_(None)).alias("detached")
    attached = owned.where(legs.c.user_id.isnot(None)).alias("attached")
    owned = owned.alias("owned")

    # Find most recently received leg attached per user
    maxattached = select(
        [attached.c.owner, func.max(attached.c.id).label("id")],
        group_by=attached.c.owner).alias("maxattached")

    # Find start of earliest unattached leg received later
    mindetached = select(
        [   detached.c.owner,
            func.min(detached.c.time_start).label("time_start")],
        or_(maxattached.c.id.is_(None), detached.c.id > maxattached.c.id),
        detached.outerjoin(
            maxattached, detached.c.owner == maxattached.c.owner),
        group_by=detached.c.owner).alias("mindetached")

    # Find start of attached overlapping leg to make it visible to the process
    overattached = select(
        [   attached.c.owner,
            func.min(attached.c.time_start).label("time_start")],
        from_obj=attached.join(mindetached, and_(
            attached.c.owner == mindetached.c.owner,
            attached.c.time_end > mindetached.c.time_start)),
        group_by=attached.c.owner).alias("overattached")

    # Find restart point
    starts = select(
        [   mindetached.c.owner,
            func.least(mindetached.c.time_start, overattached.c.time_start)],
        from_obj=mindetached.outerjoin(
            overattached, mindetached.c.owner == overattached.c.owner))

    # In repair mode, just start from the top.
    if repair:
        starts = select(
            [owned.c.owner, func.min(owned.c.time_start)],
            group_by=owned.c.owner)

    for user, start in db.engine.execute(starts.order_by(column("owner"))):
        # Ignore the special legacy user linking userless data
        if user == 0:
            continue

        print("u"+str(user), "start attach", start)

        # Get legs from user's devices in end time order, so shorter
        # legs get attached in favor of longer legs from a more idle device.
        s = select(
            [   owned.c.id,
                owned.c.time_start,
                owned.c.time_end,
                owned.c.user_id],
            and_(owned.c.owner == user, owned.c.time_start >= start),
            order_by=owned.c.time_end)

        lastend = None
        for lid, lstart, lend, luser in db.engine.execute(s):
            print(" ".join(["u"+str(user), str(lstart)[:19], str(lend)[:19]]), end=' ')
            if lastend and lstart < lastend:
                if luser is None:
                    print("-> detached")
                    continue
                db.engine.execute(legs.update(
                    legs.c.id==lid).values(user_id=None)) # detach
                print("-> detach")
                continue
            lastend = lend
            if luser == user:
                print("-> attached")
                continue
            db.engine.execute(legs.update(
                legs.c.id==lid).values(user_id=user)) # attach
            print("-> attach")

    # Cluster backlog in batches
    cluster_legs(1000)

    # Reverse geocode labels for places created or shifted by new legs
    label_places(60)
コード例 #14
0
def list_audits():
    page = get_valid_page_or_1()
    try:
        per_page = int(request.args.get('per_page', current_app.config['DM_API_SERVICES_PAGE_SIZE']))
    except ValueError:
        abort(400, 'invalid page size supplied')

    earliest_for_each_object = convert_to_boolean(request.args.get('earliest_for_each_object'))

    if earliest_for_each_object:
        # the rest of the filters we add will be added against a subquery which we will join back onto the main table
        # to retrieve the rest of the row. this allows the potentially expensive DISTINCT ON pass to be performed
        # against an absolutely minimal subset of rows which can probably be pulled straight from an index
        audits = db.session.query(AuditEvent.id)
    else:
        audits = AuditEvent.query

    audit_date = request.args.get('audit-date', None)
    if audit_date:
        if is_valid_date(audit_date):
            audit_datetime = datetime.strptime(audit_date, DATE_FORMAT)
            audits = audits.filter(
                AuditEvent.created_at.between(audit_datetime, audit_datetime + timedelta(days=1))
            )
        else:
            abort(400, 'invalid audit date supplied')

    audit_type = request.args.get('audit-type')
    if audit_type:
        if AuditTypes.is_valid_audit_type(audit_type):
            audits = audits.filter(
                AuditEvent.type == audit_type
            )
        else:
            abort(400, "Invalid audit type")

    user = request.args.get('user')
    if user:
        audits = audits.filter(
            AuditEvent.user == user
        )

    data_supplier_id = request.args.get('data-supplier-id')
    if data_supplier_id:
        #  This filter relies on index `idx_audit_events_data_supplier_id`. See `app..models.main` for its definition.
        audits = audits.filter(
            func.coalesce(
                AuditEvent.__table__.c.data['supplierId'].astext,
                AuditEvent.__table__.c.data['supplier_id'].astext,
            ) == data_supplier_id
        )

    acknowledged = request.args.get('acknowledged', None)
    if acknowledged and acknowledged != 'all':
        if is_valid_acknowledged_state(acknowledged):
            if convert_to_boolean(acknowledged):
                audits = audits.filter(
                    AuditEvent.acknowledged == true()
                )
            elif not convert_to_boolean(acknowledged):
                audits = audits.filter(
                    AuditEvent.acknowledged == false()
                )
        else:
            abort(400, 'invalid acknowledged state supplied')

    object_type = request.args.get('object-type')
    object_id = request.args.get('object-id')
    if object_type:
        if object_type not in AUDIT_OBJECT_TYPES:
            abort(400, 'invalid object-type supplied')

        ref_model = AUDIT_OBJECT_TYPES[object_type]
        ext_id_field = AUDIT_OBJECT_ID_FIELDS[object_type]

        audits = audits.filter(AuditEvent.object.is_type(ref_model))

        # "object_id" here is the *external* object_id
        if object_id:
            ref_object = ref_model.query.filter(
                ext_id_field == object_id
            ).first()

            if ref_object is None:
                abort(404, "Object with given object-type and object-id doesn't exist")

            # this `.identity_key_from_instance(...)[1][0]` is exactly the method used by sqlalchemy_utils' generic
            # relationship code to extract an object's pk value, so *should* be relatively stable, API-wise.
            # the `[1]` is to select the pk's *value* rather than the `Column` object and the `[0]` simply fetches
            # the first of any pk values - generic relationships are already assuming that compound pks aren't in
            # use by the target.
            ref_object_pk = class_mapper(ref_model).identity_key_from_instance(ref_object)[1][0]
            audits = audits.filter(
                AuditEvent.object_id == ref_object_pk
            )
    elif object_id:
        abort(400, 'object-id cannot be provided without object-type')

    if earliest_for_each_object:
        if not (
            acknowledged and
            convert_to_boolean(acknowledged) is False and
            audit_type == "update_service" and
            object_type == "services"
        ):
            current_app.logger.warning(
                "earliest_for_each_object option currently intended for use on acknowledged update_service events. "
                "If use with any other events is to be regular, the scope of the corresponding partial index "
                "should be expanded to cover it."
            )
        # we need to join the built-up subquery back onto the AuditEvent table to retrieve the rest of the row
        audits_subquery = audits.order_by(
            AuditEvent.object_type,
            AuditEvent.object_id,
            AuditEvent.created_at,
            AuditEvent.id,
        ).distinct(
            AuditEvent.object_type,
            AuditEvent.object_id,
        ).subquery()

        audits = AuditEvent.query.join(audits_subquery, audits_subquery.c.id == AuditEvent.id)

    sort_order = db.desc if convert_to_boolean(request.args.get('latest_first')) else db.asc
    audits = audits.order_by(sort_order(AuditEvent.created_at), sort_order(AuditEvent.id))

    return paginated_result_response(
        result_name=RESOURCE_NAME,
        results_query=audits,
        page=page,
        per_page=per_page,
        endpoint='.list_audits',
        request_args=request.args
    ), 200