Пример #1
0
  def get(self):
    obj_count = \
      sa.sql.select([
        Tag.ns,
        functions.count(entity_tag_tbl.c.entity_id).label('obj_count'),
      ])\
            .select_from(Tag.__table__.join(entity_tag_tbl))\
            .group_by(Tag.ns)\
            .alias()

    ns_query = sa.sql.select(
      [Tag.ns,
       functions.count(Tag.id).label('tag_count'),
       obj_count.c.obj_count],
      from_obj=[Tag.__table__.outerjoin(obj_count, Tag.ns == obj_count.c.ns)]
    )\
                     .group_by(Tag.ns, obj_count.c.obj_count)\
                     .order_by(Tag.ns)

    session = current_app.db.session()
    namespaces = session.execute(ns_query)

    return render_template(
      'admin/tags.html',
      namespaces=namespaces,
    )
Пример #2
0
def top_maps_by_times_played_q(cutoff_days, region = None, game_type_cd = None):
    """
    Query to retrieve the top maps by the amount of times it was played
    during a date range.

    Games older than cutoff_days days old are ignored.
    """
    # only games played during this range are considered
    right_now = datetime.utcnow()
    cutoff_dt = right_now - timedelta(days=cutoff_days)

    top_maps_q = DBSession.query(Game.map_id, Map.name,
            func.count()).\
            filter(Map.map_id==Game.map_id).\
            filter(expr.between(Game.create_dt, cutoff_dt, right_now)).\
            order_by(expr.desc(func.count())).\
            group_by(Game.map_id).\
            group_by(Map.name)

    if region and region != "" and region != "0":
      top_maps_q = top_maps_q.filter(Server.region==region).filter(Server.server_id==Game.server_id)
    if game_type_cd and game_type_cd != "":
      top_maps_q = top_maps_q.filter(Game.game_type_cd == game_type_cd)    

    return top_maps_q
Пример #3
0
    def top_maps(self):
        """Returns the raw data shared by all renderers."""
        try:
            top_maps_q = DBSession.query(
                fg.row_number().over(order_by=expr.desc(func.count())).label("rank"),
                Game.map_id, Map.name, func.count().label("times_played"))\
                .filter(Map.map_id == Game.map_id)\
                .filter(Game.server_id == self.server_id)\
                .filter(Game.create_dt > (self.now - timedelta(days=self.lifetime)))\
                .group_by(Game.map_id)\
                .group_by(Map.name) \
                .order_by(expr.desc(func.count()))

            if self.last:
                top_maps_q = top_maps_q.offset(self.last)

            if self.limit:
                top_maps_q = top_maps_q.limit(self.limit)

            top_maps = top_maps_q.all()
        except Exception as e:
            log.debug(e)
            raise HTTPNotFound

        return top_maps
Пример #4
0
 def execute(self, message, user, params):
     
     tick = Updates.current_tick() + (params.group(1) or 1)
     replies = []
     
     Q = session.query(Galaxy.x, Galaxy.y, count())
     Q = Q.join(Target.planet)
     Q = Q.join(Planet.galaxy)
     Q = Q.filter(Planet.active == True)
     Q = Q.filter(Target.tick >= tick)
     Q = Q.group_by(Galaxy.x, Galaxy.y)
     result = Q.all()
     prev = []
     for x, y, bitches in result:
         prev.append("%s:%s(%s)"%(x,y,bitches))
     replies.append("Active bookings: " + ", ".join(prev))
     
     Q = session.query(Alliance.name, count())
     Q = Q.join(Target.planet)
     Q = Q.outerjoin(Planet.intel)
     Q = Q.outerjoin(Intel.alliance)
     Q = Q.filter(Planet.active == True)
     Q = Q.filter(Target.tick >= tick)
     Q = Q.group_by(Alliance.name)
     result = Q.all()
     prev = []
     for name, bitches in result:
         prev.append("%s (%s)"%(name or "Unknown", bitches))
     replies.append("Active bitches: " + ", ".join(prev))
     
     if len(replies) < 1:
         replies.append("No active bookings. This makes %s sad. Please don't make %s sad." %((Config.get("Connection","nick"),)*2))
     message.reply("\n".join(replies))
Пример #5
0
    def fleet_overview(self):
        if self.scantype not in ("J",):
            return

        from sqlalchemy.sql.functions import min, sum

        f = aliased(FleetScan)
        a = aliased(FleetScan)
        d = aliased(FleetScan)

        Q = session.query(
            f.landing_tick,
            f.landing_tick - min(Scan.tick),
            count(a.id),
            coalesce(sum(a.fleet_size), 0),
            count(d.id),
            coalesce(sum(d.fleet_size), 0),
        )
        Q = Q.join(f.scan)
        Q = Q.filter(f.scan == self)

        Q = Q.outerjoin((a, and_(a.id == f.id, a.mission.ilike("Attack"))))
        Q = Q.outerjoin((d, and_(d.id == f.id, d.mission.ilike("Defend"))))

        Q = Q.group_by(f.landing_tick)
        Q = Q.order_by(asc(f.landing_tick))
        return Q.all()
Пример #6
0
    def get_counts_for_query(self, q):
        # HACKITY HACK
        entities = [
            x.entity_zero.entity for x in q._entities]
        entities = {e.__mapper__.tables[0].name: e for e in entities}
        content_entity = entities['content']

        post = with_polymorphic(
            Post, [], Post.__table__,
            aliased=False, flat=True)
        q = q.join(
            post, (content_entity.id == post.id) &
                  (post.publication_state.in_(countable_publication_states)))

        if self.user_id:
            action_entity = entities['action']
            return q.with_entities(
                count(content_entity.id),
                count(post.creator_id.distinct()),
                count(action_entity.id)).first()
        else:
            (post_count, contributor_count) = q.with_entities(
                count(content_entity.id),
                count(post.creator_id.distinct())).first()
            return (post_count, contributor_count, 0)
Пример #7
0
 def execute(self, request, user, x, y, z, h=False, hs=False, ticks=None):
     planet = Planet.load(x,y,z)
     if planet is None:
         return HttpResponseRedirect(reverse("planet_ranks"))
     
     ticks = int(ticks or 0) if (h or hs) else 12
     
     if not hs:
         sizediffvalue = PlanetHistory.rdiff * PA.getint("numbers", "roid_value")
         valuediffwsizevalue = PlanetHistory.vdiff - sizediffvalue
         resvalue = valuediffwsizevalue * PA.getint("numbers", "res_value")
         shipvalue = valuediffwsizevalue * PA.getint("numbers", "ship_value")
         xpvalue = PlanetHistory.xdiff * PA.getint("numbers", "xp_value")
         Q = session.query(PlanetHistory,
                             sizediffvalue,
                             valuediffwsizevalue,
                             resvalue, shipvalue,
                             xpvalue,
                             )
         Q = Q.filter(PlanetHistory.current == planet)
         Q = Q.order_by(desc(PlanetHistory.tick))
         history = Q[:ticks] if ticks else Q.all()
     else:
         history = None
     
     if not (h or hs):
         landings = session.query(PlanetLandings.hour, count()).filter(PlanetLandings.planet==planet).group_by(PlanetLandings.hour).all()
         landed = session.query(PlanetLandedOn.hour, count()).filter(PlanetLandedOn.planet==planet).group_by(PlanetLandedOn.hour).all()
         vdrops = session.query(PlanetValueDrops.hour, count()).filter(PlanetValueDrops.planet==planet).group_by(PlanetValueDrops.hour).all()
         idles = session.query(PlanetIdles.hour, count()).filter(PlanetIdles.planet==planet).group_by(PlanetIdles.hour).all()
         hourstats = {
                         'landings' : dict(landings), 'landingsT' : sum([c for hour,c in landings]),
                         'landed'   : dict(landed),   'landedT'   : sum([c for hour,c in landed]),
                         'vdrops'   : dict(vdrops),   'vdropsT'   : sum([c for hour,c in vdrops]),
                         'idles'    : dict(idles),    'idlesT'    : sum([c for hour,c in idles]),
                         }
     else:
         hourstats = None
     
     if not h:
         Q = session.query(PlanetHistory)
         Q = Q.filter(or_(PlanetHistory.hour == 23, PlanetHistory.tick == Updates.current_tick()))
         Q = Q.filter(PlanetHistory.current == planet)
         Q = Q.order_by(desc(PlanetHistory.tick))
         hsummary = Q.all() if hs else Q[:14]
     else:
         hsummary = None
     
     return render(["planet.tpl",["hplanet.tpl","hsplanet.tpl"][hs]][h or hs],
                     request,
                     planet = planet,
                     history = history,
                     hour = datetime.utcnow().hour, hourstats = hourstats,
                     hsummary = hsummary,
                     ticks = ticks,
                   )
Пример #8
0
Файл: main.py Проект: z/XonStat
def main_index(request):
    leaderboard_count = 10
    recent_games_count = 32

    # top players by score
    top_players = DBSession.query(Player.player_id, Player.nick, 
            func.sum(PlayerGameStat.score)).\
            filter(Player.player_id == PlayerGameStat.player_id).\
            filter(Player.player_id > 2).\
            order_by(expr.desc(func.sum(PlayerGameStat.score))).\
            group_by(Player.nick).\
            group_by(Player.player_id).all()[0:10]

    top_players = [(player_id, html_colors(nick), score) \
            for (player_id, nick, score) in top_players]

    for i in range(leaderboard_count-len(top_players)):
        top_players.append(('-', '-', '-'))

    # top servers by number of total players played
    top_servers = DBSession.query(Server.server_id, Server.name, 
            func.count()).\
            filter(Game.server_id==Server.server_id).\
            order_by(expr.desc(func.count(Game.game_id))).\
            group_by(Server.server_id).\
            group_by(Server.name).all()[0:10]

    for i in range(leaderboard_count-len(top_servers)):
        top_servers.append(('-', '-', '-'))

    # top maps by total times played
    top_maps = DBSession.query(Map.map_id, Map.name, 
            func.count(Game.game_id)).\
            filter(Map.map_id==Game.game_id).\
            order_by(expr.desc(func.count(Game.game_id))).\
            group_by(Map.map_id).\
            group_by(Map.name).all()[0:10]

    for i in range(leaderboard_count-len(top_maps)):
        top_maps.append(('-', '-', '-'))

    recent_games = DBSession.query(Game, Server, Map).\
            filter(Game.server_id==Server.server_id).\
            filter(Game.map_id==Map.map_id).\
            order_by(expr.desc(Game.start_dt)).all()[0:recent_games_count]

    for i in range(recent_games_count-len(recent_games)):
        recent_games.append(('-', '-', '-'))

    return {'top_players':top_players,
            'top_servers':top_servers,
            'top_maps':top_maps,
            'recent_games':recent_games,
            }
Пример #9
0
    def getAlarmCount(days=0):
        """
        Get number of alarms, grouped by state

        :param optional days: 0 for all alarms, since days else
        :return: list grouped by state
        """
        if days != 0:
            return db.get(Alarm.state, count(Alarm.id)).filter(Alarm.timestamp > (datetime.datetime.now() - datetime.timedelta(days=days))).order_by(Alarm.timestamp.desc()).group_by(Alarm.state).all()
        else:
            return db.get(Alarm.state, count(Alarm.id)).group_by(Alarm.state).all()
Пример #10
0
 def execute(self, request, user, name):
     alliance = Alliance.load(name)
     if alliance is None:
         return HttpResponseRedirect(reverse("alliance_ranks"))
     
     ph = aliased(PlanetHistory)
     members = count().label("members")
     size = sum(ph.size).label("size")
     value = sum(ph.value).label("value")
     score = sum(ph.score).label("score")
     avg_size = size.op("/")(members).label("avg_size")
     avg_value = value.op("/")(members).label("avg_value")
     t10v = count(case(whens=((ph.value_rank <= 10 ,1),), else_=None)).label("t10v")
     t100v = count(case(whens=((ph.value_rank <= 100 ,1),), else_=None)).label("t100v")
     
     pho = aliased(PlanetHistory)
     sizeo = sum(pho.size).label("sizeo")
     valueo = sum(pho.value).label("valueo")
     scoreo = sum(pho.score).label("scoreo")
     
     Q = session.query(PlanetHistory.tick.label("tick"),
                       Alliance.id.label("id"),
                       literal_column("rank() OVER (PARTITION BY planet_history.tick ORDER BY sum(planet_history.size) DESC)").label("size_rank"),
                       literal_column("rank() OVER (PARTITION BY planet_history.tick ORDER BY sum(planet_history.value) DESC)").label("value_rank"),
                       )
     Q = Q.filter(PlanetHistory.active == True)
     Q = Q.join(PlanetHistory.current)
     Q = Q.join(Planet.intel)
     Q = Q.join(Intel.alliance)
     Q = Q.group_by(PlanetHistory.tick, Alliance.id)
     ranks = Q.subquery()
     
     Q = session.query(ph.tick, members,
                       size, value,
                       avg_size, avg_value,
                       size-sizeo, value-valueo, score-scoreo,
                       t10v, t100v,
                       )
     Q = Q.filter(ph.active == True)
     Q = Q.join(ph.current)
     Q = Q.join(Planet.intel)
     Q = Q.join(Intel.alliance)
     Q = Q.outerjoin((pho, and_(ph.id==pho.id, ph.tick-1==pho.tick),))
     Q = Q.filter(Intel.alliance == alliance)
     Q = Q.group_by(ph.tick)
     
     Q = Q.from_self().add_columns(ranks.c.size_rank, ranks.c.value_rank)
     Q = Q.outerjoin((ranks, and_(ph.tick == ranks.c.tick, alliance.id == ranks.c.id),))
     Q = Q.order_by(desc(ph.tick))
     
     history = Q.all()
     
     return render("ialliancehistory.tpl", request, alliance=alliance, members=alliance.intel_members, history=history)
Пример #11
0
def upgrade(pyramid_env):
    from assembl.models import IdeaLink, get_session_maker
    db = get_session_maker()()
    # First, reorder live links.
    with transaction.manager:
        ids = db.query(IdeaLink.source_id)\
            .filter(IdeaLink.tombstone_date == None)\
            .group_by(IdeaLink.source_id, IdeaLink.order)\
            .having((count(IdeaLink.id) > 1)).all()
        for (source_id,) in ids:
            links = db.query(IdeaLink).filter_by(
                source_id=source_id,
                tombstone_date=None).order_by(
                IdeaLink.order, IdeaLink.id).all()
            for n, link in enumerate(links):
                link.order = n + 1
        # Then dead links
        q = db.query(
            IdeaLink.source_id, IdeaLink.tombstone_date).\
            group_by(IdeaLink.source_id, IdeaLink.order,
                     IdeaLink.tombstone_date).\
            having((count(IdeaLink.id) > 1)).all()
        for (source_id, date) in q:
            if not date:
                continue
            dest_links = db.query(IdeaLink).filter_by(
                source_id=source_id,
                tombstone_date=date).all()
            # Try to find the order of the ordered link the closest
            # in date to each current link.
            all_links = db.query(IdeaLink).filter_by(source_id=source_id).all()
            by_base_id = defaultdict(list)
            for link in all_links:
                by_base_id[link.base_id].append(link)
            signatures = {}
            for dest in dest_links:
                base_id = dest.base_id
                links = by_base_id[base_id]
                # source_id should be the same.
                links = [l for l in links if l.order]

                def distance(l):
                    if l == dest:
                        return -1
                    if not l.tombstone_date:
                        return 0
                    return abs((l.tombstone_date-dest.tombstone_date).seconds)
                links.sort(key=distance)
                signatures[base_id] = tuple((l.order for l in links))
            dest_links.sort(key=lambda l: signatures[l.base_id])
            for n, link in enumerate(dest_links):
                link.order = n
Пример #12
0
def calculateDomainAffinities():
    DeityDomain2 = aliased(DeityDomain)
    Domain2 = aliased(Domain)
    q_affinity = session.query(Domain.name, Domain2.name, count()).\
        join(DeityDomain).\
        join(Deity).\
        join(DeityDomain2).\
        join(Domain2).\
        filter(Domain.id != Domain2.id).\
        order_by(Domain.name, count().desc(), Domain2.name).\
        group_by(Domain.name, Domain2.name)

    return q_affinity.all()
Пример #13
0
Файл: info.py Проект: JDD/merlin
 def execute(self, message, user, params):
     
     tag_count = PA.getint("numbers", "tag_count")
     
     alliance = Alliance.load(params.group(1))
     if alliance is None:
         message.reply("No alliance matching '%s' found"%(params.group(1),))
         return
     
     Q = session.query(sum(Planet.value), sum(Planet.score),
                       sum(Planet.size), sum(Planet.xp),
                       count())
     Q = Q.join(Planet.intel)
     Q = Q.filter(Planet.active == True)
     Q = Q.filter(Intel.alliance==alliance)
     Q = Q.group_by(Intel.alliance_id)
     result = Q.first()
     if result is None:
         message.reply("No planets in intel match alliance %s"%(alliance.name,))
         return
     
     value, score, size, xp, members = result
     if members <= tag_count:
         reply="%s Members: %s/%s, Value: %s, Avg: %s," % (alliance.name,members,alliance.members,value,value//members)
         reply+=" Score: %s, Avg: %s," % (score,score//members) 
         reply+=" Size: %s, Avg: %s, XP: %s, Avg: %s" % (size,size//members,xp,xp//members)
         message.reply(reply)
         return
     
     Q = session.query(Planet.value, Planet.score, 
                       Planet.size, Planet.xp, 
                       Intel.alliance_id)
     Q = Q.join(Planet.intel)
     Q = Q.filter(Planet.active == True)
     Q = Q.filter(Intel.alliance==alliance)
     Q = Q.order_by(desc(Planet.score))
     Q = Q.limit(tag_count)
     Q = Q.from_self(sum(Planet.value), sum(Planet.score),
                     sum(Planet.size), sum(Planet.xp),
                     count())
     Q = Q.group_by(Intel.alliance_id)
     ts_result = Q.first()
     
     ts_value, ts_score, ts_size, ts_xp, ts_members = ts_result
     reply="%s Members: %s/%s (%s)" % (alliance.name,members,alliance.members,ts_members)
     reply+=", Value: %s (%s), Avg: %s (%s)" % (value,ts_value,value//members,ts_value//ts_members)
     reply+=", Score: %s (%s), Avg: %s (%s)" % (score,ts_score,score//members,ts_score//ts_members)
     reply+=", Size: %s (%s), Avg: %s (%s)" % (size,ts_size,size//members,ts_size//ts_members)
     reply+=", XP: %s (%s), Avg: %s (%s)" % (xp,ts_xp,xp//members,ts_xp//ts_members)
     message.reply(reply)
Пример #14
0
def calculateDomainAffinitiesForSetting(setting):
    DeityDomain2 = aliased(DeityDomain)
    Domain2 = aliased(Domain)
    q_settingaffinity = session.query(Domain.name, Domain2.name, count()).\
        join(DeityDomain).\
        join(Deity).\
        join(DeityDomain2).\
        join(Domain2).\
        join(DeitySetting).\
        filter(Domain.id != Domain2.id, DeitySetting.setting == setting).\
        order_by(Domain.name, count().desc(), Domain2.name).\
        group_by(Domain.name, Domain2.name)

    return q_settingaffinity.all()
Пример #15
0
 def get_counts_for_query(self, q):
     if self.user_id:
         # HACKITY HACK
         (content_entity, action_entity) = [
             x.entity_zero.entity for x in q._entities]
         return q.with_entities(
             count(content_entity.id), count(action_entity.id)).first()
         return (post_count, viewed_count)
     else:
         (content_entity,) = [
             x.entity_zero.entity for x in q._entities]
         (post_count,) = q.with_entities(
             count(content_entity.id)).first()
         return (post_count, 0)
Пример #16
0
    def execute(self, message, user, params):

        alliance = Alliance.load(params.group(1))
        if alliance is None:
            message.reply("No alliance matching '%s' found" % (params.group(1),))
            return
        if params.group(2):
            alliance2 = Alliance.load(params.group(2))
            if alliance2 is None:
                message.reply("No alliance matching '%s' found" % (params.group(2),))
                return
        bums = int(params.group(3) or 2)
        Q = session.query(Galaxy.x, Galaxy.y, count())
        Q = Q.join(Galaxy.planets)
        Q = Q.join(Planet.intel)
        Q = Q.filter(Galaxy.active == True)
        Q = Q.filter(Planet.active == True)
        if params.group(2):
            R = Q.filter(Intel.alliance == alliance2)
            R = R.group_by(Galaxy.x, Galaxy.y)
            R = R.having(count() >= bums)
        Q = Q.filter(Intel.alliance == alliance)
        Q = Q.group_by(Galaxy.x, Galaxy.y)
        Q = Q.having(count() >= bums)
        prev = []
        if params.group(2):
            for x1, y1, c1 in Q.all():
                for x2, y2, c2 in R.all():
                    if x1 == x2 and y1 == y2:
                        prev.append("%s:%s (%s,%s)" % (x1, y1, c1, c2))
            if len(prev) < 1:
                message.reply(
                    "No galaxies with at least %s bumchums from %s and %s" % (bums, alliance.name, alliance2.name)
                )
                return
            reply = "Galaxies with at least %s bums from %s and %s: " % (
                bums,
                alliance.name,
                alliance2.name,
            ) + " | ".join(prev)
        else:
            result = Q.all()
            if len(result) < 1:
                message.reply("No galaxies with at least %s bumchums from %s" % (bums, alliance.name))
                return
            prev = []
            for x, y, chums in result:
                prev.append("%s:%s (%s)" % (x, y, chums))
            reply = "Galaxies with at least %s bums from %s: " % (bums, alliance.name) + " | ".join(prev)
        message.reply(reply)
Пример #17
0
    def list_planets(self, message, user, params):
        oIntel = aliased(Intel)
        tIntel = aliased(Intel)
        
        # Find all planets with unknown alliance, who have been defended by planets (outside of their galaxy) with known alliance
        TQ = session.query(Planet.x, Planet.y, Planet.z, Alliance.name, count()).select_from(FleetScan).filter(FleetScan.in_galaxy==False, FleetScan.mission=="Defend")
        TQ = TQ.join(oIntel, FleetScan.owner_id == oIntel.planet_id).join(tIntel, FleetScan.target_id == tIntel.planet_id)
        TQ = TQ.filter(tIntel.alliance_id == None).filter(oIntel.alliance_id != None)
        TQ = TQ.join(Alliance, oIntel.alliance_id == Alliance.id).join(Planet, FleetScan.target_id == Planet.id)
        TQ = TQ.group_by(Planet.x, Planet.y, Planet.z, Alliance.name)

        # Find all planets with unknown alliance, who have defended planets (outside of their galaxy) with known alliance
        OQ = session.query(Planet.x, Planet.y, Planet.z, Alliance.name, count()).select_from(FleetScan).filter(FleetScan.in_galaxy==False, FleetScan.mission=="Defend")
        OQ = OQ.join(oIntel, FleetScan.owner_id == oIntel.planet_id).join(tIntel, FleetScan.target_id == tIntel.planet_id)
        OQ = OQ.filter(tIntel.alliance_id != None).filter(oIntel.alliance_id == None)
        OQ = OQ.join(Alliance, tIntel.alliance_id == Alliance.id).join(Planet, FleetScan.owner_id == Planet.id)
        OQ = OQ.group_by(Planet.x, Planet.y, Planet.z, Alliance.name)

        # A FULL OUTER JOIN would fit nicely here, but SQLAlchemy doesn't support it and I'm trying to stick with ORM, so we'll use Python

        # Combine the results into one sorted list
        results = sorted(TQ.all()+OQ.all())

        # Quit now if there are no results
        if len(results) == 0:
            message.reply("No suggestions found")
            return

        i = 0
        while i < (len(results)-1):
          # Check for planet/alliance combinations that appeared in both lists
          if results[i][:4] == results[i+1][:4]:
            r = list(results.pop(i))
            # Add the fleet counts (r[i+1] has moved to r[i])
            r[4] += results.pop(i)[4]
            results.insert(i, r)
          i+=1

        # Sort by number of fleets using a helper function
        from operator import itemgetter
        results.sort(key=itemgetter(4), reverse=True)

        # Reply to the user
        message.reply("Coords     Suggestion      Fleets")
        limit = int(params.group(1) or 5)
        for r in results[:limit]:
            message.reply("%-9s  %-14s  %s" % ("%s:%s:%s" % (r[0], r[1], r[2]), r[3], r[4]))
        if len(results) > limit:
            message.reply("%s results not shown (%s total)" % (len(results)-limit, len(results)))
Пример #18
0
def dashboard(request):
    dbsession = DBSession()

    jobs = Job.get_last()
    running_jobs = Job.get_running()
    upcoming_jobs = Job.get_upcoming()

    # statistics
    num_clients = dbsession.query(count(Client.clientid)).scalar()
    num_jobs = dbsession.query(count(Job.jobid)).scalar()
    num_volumes = dbsession.query(count(Media.mediaid)).scalar()
    sum_volumes = Media.format_byte_size(dbsession.query(sum(Media.volbytes)).scalar() or 0)
    database_size = get_database_size(DBSession.bind)

    return locals()
Пример #19
0
    def __init__(self, *args, **kwargs):
        super(EmailAlertForm, self).__init__(*args, **kwargs)
        committee_list = Committee\
                         .query\
                         .order_by(Committee.house_id.desc())\
                         .order_by(Committee.name)\
                         .filter_by(monitored=True)\
                         .all()

        # count of daily schedule subscribers
        subs = User.query.filter(User.subscribe_daily_schedule == True, User.confirmed_at != None).count()  # noqa
        self.daily_schedule_subscribers.label.text += " (%d)" % subs

        # count subscribers for committees
        subscriber_counts = {t[0]: t[1]
                for t in db.session\
                    .query(user_committee_alerts.c.committee_id,
                           count(1))\
                    .join(User, User.id == user_committee_alerts.c.user_id)\
                    .filter(User.confirmed_at != None)\
                    .group_by(user_committee_alerts.c.committee_id)\
                    .all()}

        self.committee_ids.choices = [(c.id, "%s - %s (%d)" % (c.house.name, c.name, subscriber_counts.get(c.id, 0))) for c in committee_list]

        self.message = None
        self.ad_hoc_mapper = []
        for committee in committee_list:
            if committee.ad_hoc:
                self.ad_hoc_mapper.append(committee.id)
Пример #20
0
    def get(self):
        email = self.get_email()

        # Shorter variable names
        auth_user = auth_user_table
        survey = survey_table
        submission = submission_table
        submission_id = submission_table.c.submission_id
        submission_time = submission_table.c.submission_time

        result = self._get_records(
            table=auth_user.join(survey).outerjoin(submission),
            email=email,
            selected=[
                survey_table.c.survey_title,
                count(submission_id).label('num_submissions'),
                # survey_table.c.created_on,
                sqlmax(submission_time).label('latest_submission'),
                survey_table.c.survey_id,
            ],
            text_filter_column=survey_table.c.survey_title,
            default_sort_column_name='latest_submission',
            total_records=get_number_of_surveys(self.db, email)
        )

        self.write(result)
Пример #21
0
def _base_query(table: Table,
                email: str,
                selected: list,
                where: BinaryExpression=None) -> Select:
    """
    Return a query for a DataTable without any text filtering, ordering,
    or limiting applied.

    :param table: the SQLAlchemy table. Should be one or more tables joined
                  with the auth_user table
    :param email: the user's e-mail address
    :param selected: the columns to select from the table
    :param where: an optional WHERE clause to apply to the query
    :return: the query object
    """
    # Selected columns (but not aggregate functions) must also appear in the
    # GROUP BY clause
    grouped = (column for column in selected if type(column) is Column)

    query = select(
        # The extra column is for the DataTable recordsFiltered attribute.
        # It represents the number of records found before applying a sql LIMIT
        selected + [count().over().label('filtered')]
    ).select_from(
        table
    ).group_by(
        *grouped
    ).where(
        auth_user_table.c.email == email
    )
    if where is not None:
        query = query.where(where)
    return query
Пример #22
0
    def set_jcmt_options(self, proposal_id, target_of_opp, daytime,
                         time_specific, polarimetry):
        """
        Set the JCMT proposal options for a given proposal.
        """

        values = {
            jcmt_options.c.target_of_opp: target_of_opp,
            jcmt_options.c.daytime: daytime,
            jcmt_options.c.time_specific: time_specific,
            jcmt_options.c.polarimetry: polarimetry,
        }

        with self._transaction() as conn:
            if 0 < conn.execute(select(
                    [count(jcmt_options.c.proposal_id)]).where(
                        jcmt_options.c.proposal_id == proposal_id)).scalar():
                # Update existing options.
                result = conn.execute(jcmt_options.update().where(
                    jcmt_options.c.proposal_id == proposal_id
                ).values(values))

                if result.rowcount != 1:
                    raise ConsistencyError(
                        'no rows matched updating JCMT options')

            else:
                # Add new options record.
                values.update({
                    jcmt_options.c.proposal_id: proposal_id,
                })

                conn.execute(jcmt_options.insert().values(values))
Пример #23
0
 def execute(self, message, user, params):
     
     alliance = Alliance.load(params.group(1))
     if alliance is None:
         message.reply("No alliance matching '%s' found"%(params.group(1),))
         return
     
     Q = session.query(sum(Planet.value), sum(Planet.score),
                       sum(Planet.size), sum(Planet.xp),
                       count(), Planet.race)
     Q = Q.join(Planet.intel)
     Q = Q.filter(Planet.active == True)
     Q = Q.filter(Intel.alliance==alliance)
     Q = Q.group_by(Intel.alliance_id, Planet.race)
     Q = Q.order_by(asc(Planet.race))
     result = Q.all()
     if len(result) < 1:
         message.reply("No planets in intel match alliance %s"%(alliance.name,))
         return
     prev=[]
     for value, score, size, xp, members, race in result:
         reply="%s %s Val(%s)" % (members,race,self.num2short(value/members),)
         reply+=" Score(%s)" % (self.num2short(score/members),)
         reply+=" Size(%s) XP(%s)" % (size/members,self.num2short(xp/members),)
         prev.append(reply)
     reply="Demographics for %s: "%(alliance.name,)+ ' | '.join(prev)
     message.reply(reply)
Пример #24
0
    def get_synthesis_contributors(self, id_only=True):
        # author of important extracts
        from .idea_content_link import Extract
        from .auth import AgentProfile
        from .post import Post
        from sqlalchemy.sql.functions import count
        local_uri = AssemblQuadStorageManager.local_uri()
        discussion_storage = \
            AssemblQuadStorageManager.discussion_storage_name()

        idea_uri = URIRef(self.uri(local_uri))
        clause = '''select distinct ?annotation where {
            %s idea:includes* ?ideaP .
            ?annotation assembl:resourceExpressesIdea ?ideaP }'''
        extract_ids = [x for (x,) in self.db.execute(
            SparqlClause(clause % (
                idea_uri.n3(),),
                quad_storage=discussion_storage.n3()))]
        r = list(self.db.query(AgentProfile.id, count(Extract.id)).join(
            Post, Post.creator_id==AgentProfile.id).join(Extract).filter(
            Extract.important == True, Extract.id.in_(extract_ids)))
        r.sort(key=lambda x: x[1], reverse=True)
        if id_only:
            return [AgentProfile.uri_generic(a) for (a, ce) in r]
        else:
            ids = [a for (a, ce) in r]
            order = {id: order for (order, id) in enumerate(ids)}
            agents = self.db.query(AgentProfile).filter(AgentProfile.id.in_(ids)).all()
            agents.sort(key=lambda a: order[a.id])
            return agents
Пример #25
0
def get_recent_weapons(player_id):
    """
    Returns the weapons that have been used in the past 90 days
    and also used in 5 games or more.
    """
    cutoff = datetime.datetime.utcnow() - datetime.timedelta(days=90)
    recent_weapons = []
    for weapon in DBSession.query(PlayerWeaponStat.weapon_cd, func.count()).\
            filter(PlayerWeaponStat.player_id == player_id).\
            filter(PlayerWeaponStat.create_dt > cutoff).\
            group_by(PlayerWeaponStat.weapon_cd).\
            having(func.count() > 4).\
            all():
                recent_weapons.append(weapon[0])

    return recent_weapons
    def mostFrequentVariableAndValue(self, variableNameList):
        """
    :type variableNameList: list(str)
"""

        subQueryList = []
        
        if len(variableNameList) == 0:
            raise EmptyVariableNameListError()

        with closing(self._sessionMaker()) as session:
            # For each variable, retrieve all possible values and their occurrence count.
            for variableName in variableNameList:
                variableNameColumn = literal(variableName).label(self._VARIABLE_NAME_KEY)
                variableValueColumn = getattr(SQLModsecurityAuditEntryMessage, variableName).label(self._VARIABLE_VALUE_KEY)
                variableValueCountColumn = count().label(self._VARIABLE_VALUE_COUNT_KEY)
                
                # Subquery of each variable.
                subQuery = self._makeQuery(session, [variableNameColumn, variableValueColumn, variableValueCountColumn])
                subQuery = subQuery.group_by(self._VARIABLE_NAME_KEY, self._VARIABLE_VALUE_KEY) 
                subQueryList.append(subQuery)
    
            # Merging all subqueries and sorting by reverse count...
            query = union(*subQueryList).order_by(desc(self._VARIABLE_VALUE_COUNT_KEY)).limit(1)
            query = query.order_by(desc(self._VARIABLE_VALUE_COUNT_KEY)).limit(1)
            
            # ... then picking the first one.
            item = session.execute(query).fetchone()
            
            if item is not None:
                return {str(item.variableName): item.variableValue}
            else:
                return None
Пример #27
0
def _samp_compute_up(t, s, **kwargs):
    if t.n is not None:
        limit = t.n
    else:
        limit = sa.select([safuncs.count() * t.frac],
                          from_obj=s.alias()).as_scalar()
    return s.order_by(safuncs.random()).limit(limit)
Пример #28
0
    def query_count(cls, phone=None, **kwargs):
        q = session.query(count(cls.reply_id))

        if phone is not None:
            q = q.filter(cls.phone_number == phone)

        return q.scalar()
Пример #29
0
def get_stats(connection: Connection,
              survey_id: str,
              email: str) -> dict:
    """
    Get statistics about the specified survey: creation time, number of
    submissions, time of the earliest submission, and time of the latest
    submission.

    :param connection: a SQLAlchemy Connection
    :param survey_id: the UUID of the survey
    :param email: the e-mail address of the user
    :return: a JSON representation of the statistics.
    """
    result = connection.execute(
        select([
            survey_table.c.created_on,
            count(submission_table.c.submission_id),
            sqlmin(submission_table.c.submission_time),
            sqlmax(submission_table.c.submission_time)
        ]).select_from(
            auth_user_table.join(survey_table).outerjoin(submission_table)
        ).where(
            survey_table.c.survey_id == survey_id
        ).where(
            auth_user_table.c.email == email
        ).group_by(
            survey_table.c.survey_id
        )
    ).first()
    return json_response({
        'created_on': maybe_isoformat(result[0]),
        'num_submissions': result[1],
        'earliest_submission_time': maybe_isoformat(result[2]),
        'latest_submission_time': maybe_isoformat(result[3])
    })
Пример #30
0
def get_taxa_photo_count(session, metadata):
    """Return the photo count for each (genus, section, species) combination.

    Taxa are returned as 4-tuples ``(genus, section, species, photo_count)``.
    """
    Base = automap_base(metadata=metadata)
    Base.prepare()
    configure_mappers()

    Photo = Base.classes.photos
    Taxon = Base.classes.taxa
    Rank = Base.classes.ranks

    stmt_genus = session.query(Photo.id, Taxon.name.label('genus')).\
        join(Photo.taxa_collection, Taxon.ranks).\
        filter(Rank.name == 'genus').subquery()

    stmt_section = session.query(Photo.id, Taxon.name.label('section')).\
        join(Photo.taxa_collection, Taxon.ranks).\
        filter(Rank.name == 'section').subquery()

    stmt_species = session.query(Photo.id, Taxon.name.label('species')).\
        join(Photo.taxa_collection, Taxon.ranks).\
        filter(Rank.name == 'species').subquery()

    q = session.query('genus', 'section', 'species',
            functions.count(Photo.id).label('photos')).\
        select_from(Photo).\
        join(stmt_genus, stmt_genus.c.id == Photo.id).\
        outerjoin(stmt_section, stmt_section.c.id == Photo.id).\
        join(stmt_species, stmt_species.c.id == Photo.id).\
        group_by('genus', 'section', 'species')

    return q
Пример #31
0
def main():
    logging.basicConfig(
        level=logging.INFO,
        format="%(asctime)s %(levelname)s %(name)s: %(message)s",
    )

    try:
        if len(sys.argv) == 2:
            from_time = datetime.fromisoformat(sys.argv[2])
        elif len(sys.argv) == 1:
            from_time = None
        else:
            raise ValueError
    except ValueError:
        print(
            "Usage:\n  guess_imports.py\n" +
            "  guess_imports.py <since-isodate>",
            file=sys.stderr,
        )
        sys.exit(2)

    # TODO: Only go over projects that have a download more recent than given date

    with database.connect() as db:
        # Count projects
        total_projects, = db.execute(
            sqlalchemy.select([functions.count()
                               ]).select_from(database.projects)).one()

        # List versions
        done_projects = 0
        for project_name, versions in iter_project_versions(db):
            process_versions(project_name, versions)
            done_projects += 1
            if done_projects % 100 == 0:
                logger.info("%d / %d", done_projects, total_projects)
Пример #32
0
 def _delete(self, s):
     start, finish = self._start_finish()
     # we delete the intervals that the statistics depend on and they will cascade
     for repeat in range(2):
         if repeat:
             q = s.query(Interval)
         else:
             q = s.query(count(Interval.id))
         q = q.filter(Interval.owner == self.owner_out)
         if start:
             q = q.filter(Interval.finish > start)
         if finish:
             q = q.filter(Interval.start < finish)
         if repeat:
             for interval in q.all():
                 log.debug('Deleting %s' % interval)
                 s.delete(interval)
         else:
             n = q.scalar()
             if n:
                 log.warning('Deleting %d intervals' % n)
             else:
                 log.warning('No intervals to delete')
     s.commit()
Пример #33
0
    def _process_next(self, wid):
        conn = self.thread.conn
        with conn.begin() as trans:
            # check if way already has a virtual_id
            vid = conn.scalar(
                select([sqlf.count()]).where(self.data.c.child == wid))

            # if it has an id, it is already done. Otherwise do search
            if vid > 0:
                return

            row = conn.execute(self.master.data.select().where(
                self.master.data.c.id == wid)).first()

            properties = [row[name] for name in self.rows]

            merge_list = self._get_all_adjacent_way_ids(wid, properties, conn)

            # only insert ways that have neighbours
            if len(merge_list) > 1:
                conn.execute(self.data.insert(), [{
                    'virtual_id': wid,
                    'child': x
                } for x in merge_list])
Пример #34
0
    def update(self, engine):

        with engine.begin() as conn:
            # XXX do we need this first delete?
            t = self.data
            conn.execute(t.delete().where(
                t.c.child.in_(self.way_table.select_modify_delete())))
            tin = self.data.alias()
            lonely = select([tin.c.virtual_id])\
                      .group_by(tin.c.virtual_id)\
                      .having(sqlf.count(text('1')) < 2)
            conn.execute(t.delete().where(t.c.virtual_id.in_(lonely)))

        # the worker threads
        workers = self.create_worker_queue(engine, self._process_next)

        idcol = self.master.data.c.id
        cur = engine.execute(
            select([idcol
                    ]).where(idcol.in_(self.way_table.select_add_modify())))
        for obj in cur:
            workers.add_task(obj[0])

        workers.finish()
Пример #35
0
    def count_json(uid):
        """Count the number of JSONs a user has access to
        :param uid:
        :return: int
        """
        try:
            # get json through user mapping path
            query_user_json = db.session.query(
                Json.id).join(JsonAccessMap).filter(JsonAccessMap.user == uid)

            # get json through team mapping path
            teams = db.session.query(
                Team.id).join(TeamMemberMap).filter(TeamMemberMap.user == uid)
            query_team_json = db.session.query(
                Json.id).join(TeamJsonMap).filter(TeamJsonMap.team.in_(teams))

            # count distinct json in team and user path
            json_count = db.session.query(count(Json.id.distinct()))\
                .filter(or_(Json.id.in_(query_user_json),
                            Json.id.in_(query_team_json))).scalar()
            return json_count
        except Exception as e:
            logging.error(e)
            raise
Пример #36
0
 def _delete_from(self, s, start=None, inclusive=True):
     composite_ids = s.query(Composite.id). \
         join(StatisticJournal, Composite.id == StatisticJournal.source_id). \
         join(StatisticName, StatisticJournal.statistic_name_id == StatisticName.id). \
         filter(StatisticName.owner == self.owner_out)
     if start:
         if inclusive:
             composite_ids = composite_ids.filter(
                 StatisticJournal.time >= start)
         else:
             composite_ids = composite_ids.filter(
                 StatisticJournal.time > start)
     log.debug(f'Delete query: {composite_ids}')
     n = s.query(count(Source.id)). \
         filter(Source.id.in_(composite_ids)). \
         scalar()
     if n:
         log.warning(
             f'Deleting {n} Composite sources ({start} onwards{" inclusive" if inclusive else ""})'
         )
         s.query(Source). \
             filter(Source.id.in_(composite_ids)). \
             delete(synchronize_session=False)
         s.commit()
Пример #37
0
def get_flight_by_id(idFlight):
    airport_1 = aliased(Airport)
    airport_2 = aliased(Airport)
    airport_3 = aliased(Airport)

    flight = Schedule.query.join(airport_1, Schedule.departure == airport_1.idAirport) \
        .join(airport_2, Schedule.arrival == airport_2.idAirport) \
        .join(Plane, Schedule.idPlane == Plane.idPlane) \
        .join(Ticket, Schedule.idFlight == Ticket.idFlight) \
        .filter(Schedule.idFlight  == idFlight)\
        .add_columns(Schedule.idFlight,
                     airport_1.name.label("departure_airport"),
                     airport_2.name.label("arrival_airport"),
                     airport_1.locate.label("departure_locate"),
                     airport_2.locate.label("arrival_locate"),
                     Schedule.departureDate.label("departure_date"),
                     Schedule.departureTime.label("departure_time"),
                     Plane.idPlane,
                     Ticket.is_empty,
                     count(Ticket.idTicket).label("empty_seats")) \
        .group_by(Schedule.idFlight) \
        .order_by(desc(Schedule.departureDate)).first()

    return flight
Пример #38
0
 def get_synthesis_contributors(self, id_only=True):
     # author of important extracts
     from .idea_content_link import Extract
     from .auth import AgentProfile
     from .post import Post
     from .generic import Content
     from sqlalchemy.sql.functions import count
     subquery = self.get_descendants_query(self.id)
     query = self.db.query(
         Post.creator_id
         ).join(Extract
         ).join(subquery, Extract.idea_id == subquery.c.id
         ).filter(Extract.important == True
         ).group_by(Post.creator_id
         ).order_by(count(Extract.id).desc())
     if id_only:
         return [AgentProfile.uri_generic(a) for (a,) in query]
     else:
         ids = [x for (x,) in query]
         if not ids:
             return []
         agents = {a.id: a for a in self.db.query(AgentProfile).filter(
             AgentProfile.id.in_(ids))}
         return [agents[id] for id in ids]
Пример #39
0
    def test_bug(self):
        '''
        so why does this work, without 'recursive'?

        EDIT: the 'recursive' is optional in sqlite!  see the very last line at
        https://www.sqlite.org/lang_with.html

        damn.  and all that trouble to make a nice bug report.
        '''
        q_counts = self.session.query(Node.id.label('id'), count(Connect.input_id).label('count')). \
            outerjoin(Connect, Node.id == Connect.output_id). \
            group_by(Node.id).order_by(Node.id).subquery()
        q_missing = self.session.query(Node.id.label('id')). \
            join(q_counts, q_counts.c.id == Node.id). \
            filter(Node.n_input != q_counts.c.count).cte()
        q_missing = q_missing.union_all(
            self.session.query(Node.id).join(
                Connect, Node.id == Connect.output_id).join(
                    q_missing, Connect.input_id == q_missing.c.id))
        print('\nbug\n%s\n' % q_missing.select())
        self.assertEqual([(5, ), (7, ), (8, )],
                         self.session.query(Node.id).filter(
                             Node.id.in_(q_missing.select())).order_by(
                                 Node.id).all())
Пример #40
0
async def test_loader_with_aggregation(user):
    count_col = count().label('count')
    user_count = select(
        [User.team_id, count_col]
    ).group_by(
        User.team_id
    ).alias()
    query = Team.outerjoin(user_count).select()
    result = await query.gino.load(
        (Team.id, Team.name, user_count.columns.team_id, count_col)
    ).all()
    assert len(result) == 2
    # team 1 doesn't have users, team 2 has 1 user
    # third and forth columns are None for team 1
    for team_id, team_name, user_team_id, user_count in result:
        if team_id == user.team_id:
            assert team_name == user.team.name
            assert user_team_id == user.team_id
            assert user_count == 1
        else:
            assert team_id is not None
            assert team_name is not None
            assert user_team_id is None
            assert user_count is None
Пример #41
0
    'pages_count': 240
}
engine.connect().execute(insert_stmt, data)

session = Session(bind=engine)
q = session.query(Book).filter(Book.title == 'Essential SQLAlchemy')
print q
book = q.one()
print(book.id, book.title)

author = Author(name='Rick Copeland')
author.books.append(book)
session.add(book)
session.flush()

####
# select CASE WHEN (BOOK.pages_count > 200) THEN 1 ELSE 0 END is_novel, count(*)
# from BOOK
# group by CASE WHEN (BOOK.pages_count > 200) THEN 1 ELSE 0 END
# order by CASE WHEN (BOOK.pages_count > 200) THEN 1 ELSE 0 END
#
is_novel_column = case([(Book.pages_count > 200, 1)], else_=0)
novel_query = session.query(is_novel_column.label('is_alias'), count()).\
    group_by(is_novel_column).\
    order_by(is_novel_column)

print novel_query
print novel_query.all()

session.close()
Пример #42
0
def get_posts(request):
    """
    Query interface on posts
    Filters have two forms:
    only_*, is for filters that cannot be reversed (ex: only_synthesis, only_orphan)
    is_*, is for filters that can be reversed (ex:is_unread=true returns only unread
     message, is_unread=false returns only read messages)
    order: can be chronological, reverse_chronological, popularity
    root_post_id: all posts below the one specified.
    family_post_id: all posts below the one specified, and all its ancestors.
    post_reply_to: replies to a given post
    root_idea_id: all posts associated with the given idea
    ids: explicit message ids.
    posted_after_date, posted_before_date: date selection (ISO format)
    post_author: filter by author
    classifier: filter on message_classifier, or absence thereof (classifier=null). Can be negated with "!"
    """
    localizer = request.localizer
    discussion_id = int(request.matchdict['discussion_id'])
    discussion = Discussion.get(int(discussion_id))
    if not discussion:
        raise HTTPNotFound(
            localizer.translate(_("No discussion found with id=%s")) %
            discussion_id)

    discussion.import_from_sources()

    user_id = authenticated_userid(request) or Everyone
    permissions = get_permissions(user_id, discussion_id)

    DEFAULT_PAGE_SIZE = 25
    page_size = DEFAULT_PAGE_SIZE

    filter_names = [
        filter_name for filter_name in request.GET.getone('filters').split(',')
        if filter_name
    ] if request.GET.get('filters') else []

    try:
        page = int(request.GET.getone('page'))
    except (ValueError, KeyError):
        page = 1

    text_search = request.GET.get('text_search', None)

    order = request.GET.get('order')
    if order is None:
        order = 'chronological'
    assert order in ('chronological', 'reverse_chronological', 'score',
                     'popularity')
    if order == 'score':
        assert text_search is not None

    if page < 1:
        page = 1

    root_post_id = request.GET.getall('root_post_id')
    if root_post_id:
        root_post_id = get_database_id("Post", root_post_id[0])
    family_post_id = request.GET.getall('family_post_id')
    if family_post_id:
        family_post_id = get_database_id("Post", family_post_id[0])

    root_idea_id = request.GET.getall('root_idea_id')
    if root_idea_id:
        root_idea_id = get_database_id("Idea", root_idea_id[0])

    ids = request.GET.getall('ids[]')
    if ids:
        ids = [get_database_id("Post", id) for id in ids]

    view_def = request.GET.get('view') or 'default'

    only_synthesis = request.GET.get('only_synthesis')

    post_author_id = request.GET.get('post_author')
    if post_author_id:
        post_author_id = get_database_id("AgentProfile", post_author_id)
        assert AgentProfile.get(
            post_author_id
        ), "Unable to find agent profile with id " + post_author_id

    post_replies_to = request.GET.get('post_replies_to')
    if post_replies_to:
        post_replies_to = get_database_id("AgentProfile", post_replies_to)
        assert AgentProfile.get(
            post_replies_to
        ), "Unable to find agent profile with id " + post_replies_to

    posted_after_date = request.GET.get('posted_after_date')
    posted_before_date = request.GET.get('posted_before_date')
    message_classifiers = request.GET.getall('classifier')

    PostClass = SynthesisPost if only_synthesis == "true" else Post
    if order == 'score':
        posts = discussion.db.query(PostClass,
                                    Content.body_text_index.score_name)
    else:
        posts = discussion.db.query(PostClass)

    posts = posts.filter(PostClass.discussion_id == discussion_id, )
    ##no_of_posts_to_discussion = posts.count()

    post_data = []

    # True means deleted only, False (default) means non-deleted only. None means both.

    # v0
    # deleted = request.GET.get('deleted', None)
    # end v0

    # v1: we would like something like that
    # deleted = request.GET.get('deleted', None)
    # if deleted is None:
    #     if view_def == 'id_only':
    #         deleted = None
    #     else:
    #         deleted = False
    # end v1

    # v2
    # deleted = request.GET.get('deleted', None)
    # if deleted is None:
    #     if not ids:
    #         deleted = False
    #     else:
    #         deleted = None
    #
    # if deleted == 'false':
    #     deleted = False
    #     posts = posts.filter(PostClass.tombstone_condition())
    # elif deleted == 'true':
    #     deleted = True
    #     posts = posts.filter(PostClass.not_tombstone_condition())
    # elif deleted == 'any':
    #     deleted = None
    #     # result will contain deleted and non-deleted posts
    #     pass
    # end v2

    # v3
    # deleted = request.GET.get('deleted', None)
    # if deleted is None:
    #     if not ids:
    #         deleted = False
    #     else:
    #         deleted = None

    # if deleted == 'true':
    #     deleted = True
    #     posts = posts.filter(PostClass.not_tombstone_condition())
    # end v3

    # v4
    deleted = request.GET.get('deleted', None)
    if deleted is None:
        if not ids:
            deleted = False
        else:
            deleted = None
    elif deleted.lower() == "any":
        deleted = None
    else:
        deleted = asbool(deleted)
    # if deleted is not in (False, True, None):
    #    deleted = False
    # end v4

    only_orphan = asbool(request.GET.get('only_orphan', False))
    if only_orphan:
        if root_idea_id:
            raise HTTPBadRequest(
                localizer.translate(
                    _("Getting orphan posts of a specific idea isn't supported."
                      )))
        orphans = Idea._get_orphan_posts_statement(
            discussion_id, True, include_deleted=deleted).subquery("orphans")
        posts = posts.join(orphans, PostClass.id == orphans.c.post_id)

    if root_idea_id:
        related = Idea.get_related_posts_query_c(discussion_id,
                                                 root_idea_id,
                                                 True,
                                                 include_deleted=deleted)
        posts = posts.join(related, PostClass.id == related.c.post_id)
    elif not only_orphan:
        if deleted is not None:
            if deleted:
                posts = posts.filter(
                    PostClass.publication_state.in_(
                        deleted_publication_states))
            else:
                posts = posts.filter(PostClass.tombstone_date == None)

    if root_post_id:
        root_post = Post.get(root_post_id)

        posts = posts.filter((Post.ancestry.like(root_post.ancestry +
                                                 cast(root_post.id, String) +
                                                 ',%'))
                             | (PostClass.id == root_post.id))
    elif family_post_id:
        root_post = Post.get(family_post_id)
        ancestor_ids = root_post.ancestor_ids()
        posts = posts.filter((Post.ancestry.like(root_post.ancestry +
                                                 cast(root_post.id, String) +
                                                 ',%'))
                             | (PostClass.id == root_post.id)
                             | (PostClass.id.in_(ancestor_ids)))
    else:
        root_post = None

    if ids:
        posts = posts.filter(Post.id.in_(ids))

    if posted_after_date:
        posted_after_date = parse_datetime(posted_after_date)
        if posted_after_date:
            posts = posts.filter(PostClass.creation_date >= posted_after_date)
        #Maybe we should do something if the date is invalid.  benoitg

    if posted_before_date:
        posted_before_date = parse_datetime(posted_before_date)
        if posted_before_date:
            posts = posts.filter(PostClass.creation_date <= posted_before_date)
        #Maybe we should do something if the date is invalid.  benoitg

    if post_author_id:
        posts = posts.filter(PostClass.creator_id == post_author_id)

    if message_classifiers:
        if any([len(classifier) == 0 for classifier in message_classifiers]):
            return {'total': 0, 'posts': []}
        polarities = [
            classifier[0] != "!" for classifier in message_classifiers
        ]
        polarity = all(polarities)
        if not polarity:
            message_classifiers = [c.strip("!") for c in message_classifiers]
        if polarity != any(polarities):
            raise HTTPBadRequest(
                _("Do not combine negative and positive classifiers"))
        # Treat null as no classifier
        includes_null = 'null' in message_classifiers
        if includes_null:
            message_classifiers_nonull = filter(lambda c: c != "null",
                                                message_classifiers)
        if polarity:
            if len(message_classifiers) == 1:
                term = PostClass.message_classifier == (
                    None if includes_null else message_classifiers[0])
            else:
                term = PostClass.message_classifier.in_(
                    message_classifiers_nonull)
                if includes_null:
                    term = term | (PostClass.message_classifier == None)
        else:
            if len(message_classifiers) == 1:
                term = PostClass.message_classifier != (
                    None if includes_null else message_classifiers[0])
            else:
                term = PostClass.message_classifier.notin_(
                    message_classifiers_nonull)
            if not includes_null:
                term = term | (PostClass.message_classifier == None)
        posts = posts.filter(term)

    if post_replies_to:
        parent_alias = aliased(PostClass)
        posts = posts.join(parent_alias, PostClass.parent)
        posts = posts.filter(parent_alias.creator_id == post_replies_to)
    # Post read/unread management
    is_unread = request.GET.get('is_unread')
    translations = None
    if user_id != Everyone:
        # This is horrible, but the join creates complex subqueries that
        # virtuoso cannot decode properly.
        read_posts = {
            v.post_id
            for v in discussion.db.query(ViewPost).filter(
                ViewPost.tombstone_condition(), ViewPost.actor_id == user_id,
                *ViewPost.get_discussion_conditions(discussion_id))
        }
        my_sentiments = {
            l.post_id: l
            for l in discussion.db.query(SentimentOfPost).filter(
                SentimentOfPost.tombstone_condition(), SentimentOfPost.actor_id
                == user_id,
                *SentimentOfPost.get_discussion_conditions(discussion_id))
        }
        if is_unread != None:
            posts = posts.outerjoin(
                ViewPost,
                and_(ViewPost.actor_id == user_id,
                     ViewPost.post_id == PostClass.id,
                     ViewPost.tombstone_date == None))
            if is_unread == "true":
                posts = posts.filter(ViewPost.id == None)
            elif is_unread == "false":
                posts = posts.filter(ViewPost.id != None)
        user = AgentProfile.get(user_id)
        service = discussion.translation_service()
        if service:
            translations = PrefCollectionTranslationTable(
                service, LanguagePreferenceCollection.getCurrent(request))
    else:
        #If there is no user_id, all posts are always unread
        my_sentiments = {}
        if is_unread == "false":
            raise HTTPBadRequest(
                localizer.translate(
                    _("You must be logged in to view which posts are read")))

    if text_search is not None:
        # another Virtuoso bug: offband kills score. but it helps speed.
        offband = () if (order == 'score') else None
        posts = posts.filter(
            Post.body_text_index.contains(text_search.encode('utf-8'),
                                          offband=offband))

    # posts = posts.options(contains_eager(Post.source))
    # Horrible hack... But useful for structure load
    if view_def == 'id_only':
        pass  # posts = posts.options(defer(Post.body))
    else:
        ideaContentLinkQuery = posts.with_entities(
            PostClass.id, PostClass.idea_content_links_above_post)
        ideaContentLinkCache = dict(ideaContentLinkQuery.all())
        # Note: we could count the like the same way and kill the subquery.
        # But it interferes with the popularity order,
        # and the benefit is not that high.
        sentiment_counts = discussion.db.query(
            PostClass.id, SentimentOfPost.type,
            count(SentimentOfPost.id)).join(SentimentOfPost).filter(
                PostClass.id.in_(posts.with_entities(PostClass.id).subquery()),
                SentimentOfPost.tombstone_condition()).group_by(
                    PostClass.id, SentimentOfPost.type)
        sentiment_counts_by_post_id = defaultdict(dict)
        for (post_id, sentiment_type, sentiment_count) in sentiment_counts:
            sentiment_counts_by_post_id[post_id][sentiment_type[
                SentimentOfPost.TYPE_PREFIX_LEN:]] = sentiment_count
        posts = posts.options(
            # undefer(Post.idea_content_links_above_post),
            joinedload_all(Post.creator),
            joinedload_all(Post.extracts),
            joinedload_all(Post.widget_idea_links),
            joinedload_all(SynthesisPost.publishes_synthesis),
            subqueryload_all(Post.attachments))
        if len(discussion.discussion_locales) > 1:
            posts = posts.options(*Content.subqueryload_options())
        else:
            posts = posts.options(*Content.joinedload_options())

    if order == 'chronological':
        posts = posts.order_by(Content.creation_date)
    elif order == 'reverse_chronological':
        posts = posts.order_by(Content.creation_date.desc())
    elif order == 'score':
        posts = posts.order_by(Content.body_text_index.score_name.desc())
    elif order == 'popularity':
        # assume reverse chronological otherwise
        posts = posts.order_by(Content.disagree_count - Content.like_count,
                               Content.creation_date.desc())
    else:
        posts = posts.order_by(Content.id)
    # print str(posts)

    no_of_posts = 0
    no_of_posts_viewed_by_user = 0

    if deleted is True:
        # We just got deleted posts, now we want their ancestors for context
        post_ids = set()
        ancestor_ids = set()

        def add_ancestors(post):
            post_ids.add(post.id)
            ancestor_ids.update(
                [int(x) for x in post.ancestry.strip(",").split(",") if x])

        posts = list(posts)
        for post in posts:
            add_ancestors(post)
        ancestor_ids -= post_ids
        if ancestor_ids:
            ancestors = discussion.db.query(PostClass).filter(
                PostClass.id.in_(ancestor_ids))
            if view_def == 'id_only':
                pass  # ancestors = ancestors.options(defer(Post.body))
            else:
                ancestors = ancestors.options(
                    # undefer(Post.idea_content_links_above_post),
                    joinedload_all(Post.creator),
                    joinedload_all(Post.extracts),
                    joinedload_all(Post.widget_idea_links),
                    joinedload_all(SynthesisPost.publishes_synthesis),
                    subqueryload_all(Post.attachments))
                if len(discussion.discussion_locales) > 1:
                    ancestors = ancestors.options(
                        *Content.subqueryload_options())
                else:
                    ancestors = ancestors.options(
                        *Content.joinedload_options())
            posts.extend(ancestors.all())

    for query_result in posts:
        score, viewpost = None, None
        if not isinstance(query_result, (list, tuple)):
            query_result = [query_result]
        post = query_result[0]
        if deleted is True:
            add_ancestors(post)

        if user_id != Everyone:
            viewpost = post.id in read_posts
            if view_def != "id_only":
                translate_content(post,
                                  translation_table=translations,
                                  service=service)
        no_of_posts += 1
        serializable_post = post.generic_json(view_def, user_id,
                                              permissions) or {}
        if order == 'score':
            score = query_result[1]
            serializable_post['score'] = score

        if viewpost:
            serializable_post['read'] = True
            no_of_posts_viewed_by_user += 1
        elif user_id != Everyone and root_post is not None and root_post.id == post.id:
            # Mark post read, we requested it explicitely
            viewed_post = ViewPost(actor_id=user_id, post=root_post)
            discussion.db.add(viewed_post)
            serializable_post['read'] = True
        else:
            serializable_post['read'] = False
        my_sentiment = my_sentiments.get(post.id, None)
        if my_sentiment is not None:
            my_sentiment = my_sentiment.generic_json('default', user_id,
                                                     permissions)
        serializable_post['my_sentiment'] = my_sentiment
        if view_def != "id_only":
            serializable_post['indirect_idea_content_links'] = (
                post.indirect_idea_content_links_with_cache(
                    ideaContentLinkCache.get(post.id, None)))
            serializable_post[
                'sentiment_counts'] = sentiment_counts_by_post_id[post.id]

        post_data.append(serializable_post)

    # Benoitg:  For now, this completely garbles threading without intelligent
    #handling of pagination.  Disabling
    #posts = posts.limit(page_size).offset(data['startIndex']-1)
    # This code isn't up to date.  If limiting the query by page, we need to
    # calculate the counts with a separate query to have the right number of
    # results
    #no_of_messages_viewed_by_user = discussion.db.query(ViewPost).join(
    #    Post
    #).filter(
    #    Post.discussion_id == discussion_id,
    #    ViewPost.actor_id == user_id,
    #).count() if user_id else 0

    data = {}
    data["page"] = page
    data["unread"] = no_of_posts - no_of_posts_viewed_by_user
    data["total"] = no_of_posts
    data["maxPage"] = max(1, ceil(float(data["total"]) / page_size))
    #TODO:  Check if we want 1 based index in the api
    data["startIndex"] = (page_size * page) - (page_size - 1)

    if data["page"] == data["maxPage"]:
        data["endIndex"] = data["total"]
    else:
        data["endIndex"] = data["startIndex"] + (page_size - 1)
    data["posts"] = post_data

    return data
Пример #43
0
    def list(self, where=None):
        """Return a list of instances of this model.

        Given a model class, build up the ORM query based on query params
        and return the query result.
        """
        self.session.flush()
        model_cls = self.resource_type
        query = self.session.query(model_cls, count().over())

        limit = self._query_arg('limit', int)
        offset = self._query_arg('offset', int)
        deleted = self._query_arg('show_deleted', bool, False)
        search_term = self._query_arg('search')
        regex = self._query_arg('regex', bool, False)
        search_fields = self._query_arg('search_fields',
                                        list,
                                        default=['title'])
        search_lang = self._query_arg('lang')

        default_sort = ['{}:ASC'.format(self.default_sort_column_name)]
        order_by_text = (element.split(':') for element in self._query_arg(
            'order_by', list, default=default_sort))

        type_constraint = self._query_arg('type')
        user_id = self._query_arg('user_id')

        num_total = self.session.query(func.count(self.resource_type.id))
        if user_id is not None:
            if model_cls is Submission:
                num_total = num_total.join(Survey.submissions)
            num_total = (num_total.outerjoin(_administrator_table).filter(
                administrator_filter(user_id)))
        num_total = num_total.scalar()

        if search_term is not None:
            for search_field in search_fields:
                query = column_search(
                    query,
                    model_cls=model_cls,
                    column_name=search_field,
                    search_term=search_term,
                    language=search_lang,
                    regex=regex,
                )

        if user_id is not None:
            if model_cls is Submission:
                query = query.join(Survey.submissions)
            query = (query.outerjoin(_administrator_table).filter(
                administrator_filter(user_id)))

        if not deleted:
            query = query.filter(~model_cls.deleted)

        if type_constraint is not None:
            query = query.filter(model_cls.type_constraint == type_constraint)

        if where is not None:
            query = query.filter(where)

        for attribute_name, direction in order_by_text:
            try:
                order = getattr(model_cls, attribute_name)
            except AttributeError:
                order = text('{} {} NULLS LAST'.format(attribute_name,
                                                       direction))
            else:
                directions = {'asc': order.asc, 'desc': order.desc}
                order = directions[direction.lower()]().nullslast()
            query = query.order_by(order)

        if limit is not None:
            query = query.limit(limit)

        if offset is not None:
            query = query.offset(offset)

        result = query.all()
        if result:
            num_filtered = result[0][1]
            models = [res[0] for res in result]
            result = self._specific_fields(models, is_detail=False)
            return num_filtered, num_total, result
        return 0, num_total, []
Пример #44
0
def _map_info_data(request):
    map_id = request.matchdict['id']

    try:
        leaderboard_lifetime = int(
            request.registry.settings['xonstat.leaderboard_lifetime'])
    except:
        leaderboard_lifetime = 30

    leaderboard_count = 10
    recent_games_count = 20

    try:
        gmap = DBSession.query(Map).filter_by(map_id=map_id).one()

        # recent games on this map
        recent_games = DBSession.query(Game, Server, Map, PlayerGameStat).\
            filter(Game.server_id==Server.server_id).\
            filter(Game.map_id==Map.map_id).\
            filter(Game.map_id==map_id).\
            filter(PlayerGameStat.game_id==Game.game_id).\
            filter(PlayerGameStat.rank==1).\
            order_by(expr.desc(Game.start_dt)).all()[0:recent_games_count]

        # top players by score
        top_scorers = DBSession.query(Player.player_id, Player.nick,
                func.sum(PlayerGameStat.score)).\
                filter(Player.player_id == PlayerGameStat.player_id).\
                filter(Game.game_id == PlayerGameStat.game_id).\
                filter(Game.map_id == map_id).\
                filter(Player.player_id > 2).\
                filter(PlayerGameStat.create_dt >
                        (datetime.utcnow() - timedelta(days=leaderboard_lifetime))).\
                order_by(expr.desc(func.sum(PlayerGameStat.score))).\
                group_by(Player.nick).\
                group_by(Player.player_id).all()[0:leaderboard_count]

        top_scorers = [(player_id, html_colors(nick), score) \
                for (player_id, nick, score) in top_scorers]

        # top players by playing time
        top_players = DBSession.query(Player.player_id, Player.nick,
                func.sum(PlayerGameStat.alivetime)).\
                filter(Player.player_id == PlayerGameStat.player_id).\
                filter(Game.game_id == PlayerGameStat.game_id).\
                filter(Game.map_id == map_id).\
                filter(Player.player_id > 2).\
                filter(PlayerGameStat.create_dt >
                        (datetime.utcnow() - timedelta(days=leaderboard_lifetime))).\
                order_by(expr.desc(func.sum(PlayerGameStat.alivetime))).\
                group_by(Player.nick).\
                group_by(Player.player_id).all()[0:leaderboard_count]

        top_players = [(player_id, html_colors(nick), score) \
                for (player_id, nick, score) in top_players]

        # top servers using/playing this map
        top_servers = DBSession.query(Server.server_id, Server.name,
                func.count(Game.game_id)).\
                filter(Game.server_id == Server.server_id).\
                filter(Game.map_id == map_id).\
                filter(Game.create_dt >
                        (datetime.utcnow() - timedelta(days=leaderboard_lifetime))).\
                order_by(expr.desc(func.count(Game.game_id))).\
                group_by(Server.name).\
                group_by(Server.server_id).all()[0:leaderboard_count]

    except Exception as e:
        gmap = None
    return {
        'gmap': gmap,
        'recent_games': recent_games,
        'top_scorers': top_scorers,
        'top_players': top_players,
        'top_servers': top_servers,
    }
Пример #45
0
def progress_reporter(test_id, token):
    """
    Handle the progress of a certain test after validating the token. If necessary, update the status on GitHub.

    :param test_id: The id of the test to update.
    :type test_id: int
    :param token: The token to check the validity of the request.
    :type token: str
    :return: Nothing.
    :rtype: None
    """
    from run import config, log
    # Verify token
    test = Test.query.filter(Test.id == test_id).first()
    if test is not None and test.token == token:
        repo_folder = config.get('SAMPLE_REPOSITORY', '')
        if 'type' in request.form:
            if request.form['type'] == 'progress':
                # Progress, log
                status = TestStatus.from_string(request.form['status'])
                # Check whether test is not running previous status again
                istatus = TestStatus.progress_step(status)
                message = request.form['message']

                if len(test.progress) != 0:
                    laststatus = TestStatus.progress_step(
                        test.progress[-1].status)

                    if laststatus in [
                            TestStatus.completed, TestStatus.canceled
                    ]:
                        return "FAIL"

                    if laststatus > istatus:
                        status = TestStatus.canceled
                        message = "Duplicate Entries"

                progress = TestProgress(test.id, status, message)
                g.db.add(progress)
                g.db.commit()

                gh = GitHub(access_token=g.github['bot_token'])
                repository = gh.repos(g.github['repository_owner'])(
                    g.github['repository'])
                # Store the test commit for testing in case of commit
                if status == TestStatus.completed:
                    commit_name = 'fetch_commit_' + test.platform.value
                    commit = GeneralData.query.filter(
                        GeneralData.key == commit_name).first()
                    fetch_commit = Test.query.filter(
                        and_(Test.commit == commit.value,
                             Test.platform == test.platform)).first()

                    if test.test_type == TestType.commit and test.id > fetch_commit.id:
                        commit.value = test.commit
                        g.db.commit()

                # If status is complete, remove the Kvm entry
                if status in [TestStatus.completed, TestStatus.canceled]:
                    log.debug("Test {id} has been {status}".format(
                        id=test_id, status=status))
                    var_average = 'average_time_' + test.platform.value
                    current_average = GeneralData.query.filter(
                        GeneralData.key == var_average).first()
                    average_time = 0
                    total_time = 0

                    if current_average is None:
                        platform_tests = g.db.query(Test.id).filter(
                            Test.platform == test.platform).subquery()
                        finished_tests = g.db.query(
                            TestProgress.test_id).filter(
                                and_(
                                    TestProgress.status.in_([
                                        TestStatus.canceled,
                                        TestStatus.completed
                                    ]), TestProgress.test_id.in_(
                                        platform_tests))).subquery()
                        in_progress_statuses = [
                            TestStatus.preparation, TestStatus.completed,
                            TestStatus.canceled
                        ]
                        finished_tests_progress = g.db.query(
                            TestProgress).filter(
                                and_(
                                    TestProgress.test_id.in_(finished_tests),
                                    TestProgress.status.in_(
                                        in_progress_statuses))).subquery()
                        times = g.db.query(
                            finished_tests_progress.c.test_id,
                            label(
                                'time',
                                func.group_concat(
                                    finished_tests_progress.c.timestamp))
                        ).group_by(finished_tests_progress.c.test_id).all()

                        for p in times:
                            parts = p.time.split(',')
                            start = datetime.datetime.strptime(
                                parts[0], '%Y-%m-%d %H:%M:%S')
                            end = datetime.datetime.strptime(
                                parts[-1], '%Y-%m-%d %H:%M:%S')
                            total_time += (end - start).total_seconds()

                        if len(times) != 0:
                            average_time = total_time // len(times)

                        new_avg = GeneralData(var_average, average_time)
                        g.db.add(new_avg)
                        g.db.commit()

                    else:
                        all_results = TestResult.query.count()
                        regression_test_count = RegressionTest.query.count()
                        number_test = all_results / regression_test_count
                        updated_average = float(
                            current_average.value) * (number_test - 1)
                        pr = test.progress_data()
                        end_time = pr['end']
                        start_time = pr['start']

                        if end_time.tzinfo is not None:
                            end_time = end_time.replace(tzinfo=None)

                        if start_time.tzinfo is not None:
                            start_time = start_time.replace(tzinfo=None)

                        last_running_test = end_time - start_time
                        updated_average = updated_average + last_running_test.total_seconds(
                        )
                        current_average.value = updated_average // number_test
                        g.db.commit()

                    kvm = Kvm.query.filter(Kvm.test_id == test_id).first()

                    if kvm is not None:
                        log.debug("Removing KVM entry")
                        g.db.delete(kvm)
                        g.db.commit()

                # Post status update
                state = Status.PENDING
                target_url = url_for('test.by_id',
                                     test_id=test.id,
                                     _external=True)
                context = "CI - {name}".format(name=test.platform.value)

                if status == TestStatus.canceled:
                    state = Status.ERROR
                    message = 'Tests aborted due to an error; please check'

                elif status == TestStatus.completed:
                    # Determine if success or failure
                    # It fails if any of these happen:
                    # - A crash (unexpected exit code)
                    # - A not None value on the "got" of a TestResultFile (
                    #       meaning the hashes do not match)
                    crashes = g.db.query(count(TestResult.exit_code)).filter(
                        and_(TestResult.test_id == test.id,
                             TestResult.exit_code !=
                             TestResult.expected_rc)).scalar()
                    results_zero_rc = g.db.query(RegressionTest.id).filter(
                        RegressionTest.expected_rc == 0).subquery()
                    results = g.db.query(count(TestResultFile.got)).filter(
                        and_(
                            TestResultFile.test_id == test.id,
                            TestResultFile.regression_test_id.in_(
                                results_zero_rc),
                            TestResultFile.got.isnot(None))).scalar()
                    log.debug(
                        'Test {id} completed: {crashes} crashes, {results} results'
                        .format(id=test.id, crashes=crashes, results=results))
                    if crashes > 0 or results > 0:
                        state = Status.FAILURE
                        message = 'Not all tests completed successfully, please check'

                    else:
                        state = Status.SUCCESS
                        message = 'Tests completed'

                    update_build_badge(state, test)

                else:
                    message = progress.message

                gh_commit = repository.statuses(test.commit)
                try:
                    gh_commit.post(state=state,
                                   description=message,
                                   context=context,
                                   target_url=target_url)
                except ApiError as a:
                    log.error(
                        'Got an exception while posting to GitHub! Message: {message}'
                        .format(message=a.message))

                if status in [TestStatus.completed, TestStatus.canceled]:
                    # Start next test if necessary, on the same platform
                    process = Process(target=start_platform,
                                      args=(g.db, repository, 60))
                    process.start()

            elif request.form['type'] == 'equality':
                log.debug('Equality for {t}/{rt}/{rto}'.format(
                    t=test_id,
                    rt=request.form['test_id'],
                    rto=request.form['test_file_id']))
                rto = RegressionTestOutput.query.filter(
                    RegressionTestOutput.id ==
                    request.form['test_file_id']).first()

                if rto is None:
                    # Equality posted on a file that's ignored presumably
                    log.info('No rto for {test_id}: {test}'.format(
                        test_id=test_id, test=request.form['test_id']))
                else:
                    result_file = TestResultFile(test.id,
                                                 request.form['test_id'],
                                                 rto.id, rto.correct)
                    g.db.add(result_file)
                    g.db.commit()

            elif request.form['type'] == 'logupload':
                log.debug("Received log file for test {id}".format(id=test_id))
                # File upload, process
                if 'file' in request.files:
                    uploaded_file = request.files['file']
                    filename = secure_filename(uploaded_file.filename)
                    if filename is '':
                        return 'EMPTY'

                    temp_path = os.path.join(repo_folder, 'TempFiles',
                                             filename)
                    # Save to temporary location
                    uploaded_file.save(temp_path)
                    final_path = os.path.join(
                        repo_folder, 'LogFiles',
                        '{id}{ext}'.format(id=test.id, ext='.txt'))

                    os.rename(temp_path, final_path)
                    log.debug("Stored log file")

            elif request.form['type'] == 'upload':
                log.debug('Upload for {t}/{rt}/{rto}'.format(
                    t=test_id,
                    rt=request.form['test_id'],
                    rto=request.form['test_file_id']))
                # File upload, process
                if 'file' in request.files:
                    uploaded_file = request.files['file']
                    filename = secure_filename(uploaded_file.filename)
                    if filename is '':
                        return 'EMPTY'
                    temp_path = os.path.join(repo_folder, 'TempFiles',
                                             filename)
                    # Save to temporary location
                    uploaded_file.save(temp_path)
                    # Get hash and check if it's already been submitted
                    hash_sha256 = hashlib.sha256()
                    with open(temp_path, "rb") as f:
                        for chunk in iter(lambda: f.read(4096), b""):
                            hash_sha256.update(chunk)
                    file_hash = hash_sha256.hexdigest()
                    filename, file_extension = os.path.splitext(filename)
                    final_path = os.path.join(
                        repo_folder, 'TestResults',
                        '{hash}{ext}'.format(hash=file_hash,
                                             ext=file_extension))
                    os.rename(temp_path, final_path)
                    rto = RegressionTestOutput.query.filter(
                        RegressionTestOutput.id ==
                        request.form['test_file_id']).first()
                    result_file = TestResultFile(test.id,
                                                 request.form['test_id'],
                                                 rto.id, rto.correct,
                                                 file_hash)
                    g.db.add(result_file)
                    g.db.commit()

            elif request.form['type'] == 'finish':
                log.debug('Finish for {t}/{rt}'.format(
                    t=test_id, rt=request.form['test_id']))
                regression_test = RegressionTest.query.filter(
                    RegressionTest.id == request.form['test_id']).first()
                result = TestResult(test.id, regression_test.id,
                                    request.form['runTime'],
                                    request.form['exitCode'],
                                    regression_test.expected_rc)
                g.db.add(result)
                try:
                    g.db.commit()
                except IntegrityError as e:
                    log.error('Could not save the results: {msg}'.format(
                        msg=e.message))

            return "OK"

    return "FAIL"
Пример #46
0
    def test_sources(self):

        with TemporaryDirectory() as f:

            args, data = bootstrap_dir(f, m(V), '5', configurator=acooke)

            with data.db.session_context() as s:

                # add a diary entry

                journal = add(s, DiaryTopicJournal(date='2018-09-29'))
                cache = journal.cache(s)
                diary = s.query(DiaryTopic).filter(DiaryTopic.title == 'Status').one()
                fields = diary.fields
                self.assertEqual(len(fields), 6, list(enumerate(map(str, fields))))
                self.assertEqual(fields[0].statistic_name.name, 'notes')
                self.assertEqual(fields[1].statistic_name.name, 'weight', str(fields[1]))
                statistics = [cache[field] for field in fields]
                for statistic in statistics:
                    self.assertTrue(statistic.value is None, statistics)
                statistics[0].value = 'hello world'
                statistics[1].value = 64.5

            with data.db.session_context() as s:

                # check the diary entry was persisted

                journal = DiaryTopicJournal.get_or_add(s, '2018-09-29')
                cache = journal.cache(s)
                diary = s.query(DiaryTopic).filter(DiaryTopic.title == 'Status').one()
                fields = diary.fields
                self.assertEqual(len(fields), 6, list(enumerate(map(str, fields))))
                self.assertEqual(fields[0].statistic_name.name, 'notes')
                self.assertEqual(fields[1].statistic_name.name, 'weight', str(fields[1]))
                statistics = [cache[field] for field in fields]
                self.assertEqual(statistics[1].value, 64.5)
                self.assertEqual(statistics[1].type, StatisticJournalType.FLOAT)

            # generate summary stats

            SummaryCalculator(data, schedule='m').run()
            SummaryCalculator(data, schedule='y').run()

            with data.db.session_context() as s:

                # check the summary stats

                diary = s.query(DiaryTopic).filter(DiaryTopic.title == 'Status').one()
                weights = s.query(StatisticJournal).join(StatisticName). \
                               filter(StatisticName.owner == diary, StatisticName.name == 'weight'). \
                               order_by(StatisticJournal.time).all()
                self.assertEqual(len(weights), 2)
                self.assertEqual(weights[1].value, 64.5)
                self.assertEqual(len(weights[1].measures), 2, weights[1].measures)
                self.assertEqual(weights[1].measures[0].rank, 1)
                self.assertEqual(weights[1].measures[0].percentile, 100, weights[1].measures[0].percentile)
                n = s.query(count(StatisticJournalFloat.id)).scalar()
                self.assertEqual(n, 4, n)
                n = s.query(count(StatisticJournalInteger.id)).scalar()
                self.assertEqual(n, 6, n)
                m_avg = s.query(StatisticJournalFloat).join(StatisticName). \
                    filter(StatisticName.name == 'avg-month-weight').one()
                self.assertEqual(m_avg.value, 64.5)
                y_avg = s.query(StatisticJournalFloat).join(StatisticName). \
                    filter(StatisticName.name == 'avg-year-weight').one()
                self.assertEqual(y_avg.value, 64.5)
                month = s.query(Interval).filter(Interval.schedule == 'm').first()
                self.assertEqual(month.start, to_date('2018-09-01'), month.start)
                self.assertEqual(month.finish, to_date('2018-10-01'), month.finish)

            with data.db.session_context() as s:

                # delete the diary entry

                journal = DiaryTopicJournal.get_or_add(s, '2018-09-29')
                s.delete(journal)

            with data.db.session_context() as s:

                # check the delete cascade

                self.assertEqual(s.query(count(DiaryTopicJournal.id)).scalar(), 1)
                # this should be zero because the Intervals were automatically deleted
                # (well, now +1 because there's an original default weight)
                for source in s.query(Source).all():
                    print(source)
                for journal in s.query(StatisticJournal).all():
                    print(journal)
                self.assertEqual(s.query(count(Source.id)).scalar(), 37, list(map(str, s.query(Source).all())))  # constants
                self.assertEqual(s.query(count(StatisticJournalText.id)).scalar(), 13, s.query(count(StatisticJournalText.id)).scalar())
                self.assertEqual(s.query(count(StatisticJournal.id)).scalar(), 22, s.query(count(StatisticJournal.id)).scalar())
Пример #47
0
from abilian.core.entities import Entity
from abilian.core.extensions import db
from abilian.core.models.tag import Tag, entity_tag_tbl
from abilian.i18n import _, _l, _n
from abilian.services import get_service
from abilian.services.indexing.service import index_update
from abilian.web import url_for
from abilian.web.admin import AdminPanel
from abilian.web.views import ObjectEdit
from abilian.web.views.base import View

from .forms import TagForm

logger = logging.getLogger(__name__)

_OBJ_COUNT = func.count(entity_tag_tbl.c.entity_id).label("obj_count")


def get_entities_for_reindex(tags):
    """Collect entities for theses tags."""
    if isinstance(tags, Tag):
        tags = (tags, )

    session = db.session()
    indexing = get_service("indexing")
    tbl = Entity.__table__
    tag_ids = [t.id for t in tags]
    query = (sa.sql.select([tbl.c.entity_type, tbl.c.id]).select_from(
        tbl.join(entity_tag_tbl,
                 entity_tag_tbl.c.entity_id == tbl.c.id)).where(
                     entity_tag_tbl.c.tag_id.in_(tag_ids)))
Пример #48
0
def union_select(select_components, distinct=False, select_type=TRIPLE_SELECT):
    """
    Helper function for building union all select statement.

    Args:
        select_components (iterable of tuples): Indicates the table and table type
            (table_name, where_clause_string, table_type)
        distinct (bool): Whether to eliminate duplicate results
        select_type (int): From `rdflib_sqlalchemy.constants`. Either `COUNT_SELECT`,
            `CONTEXT_SELECT`, `TRIPLE_SELECT`

    """
    selects = []
    for table, whereClause, tableType in select_components:

        if select_type == COUNT_SELECT:
            c = table.c
            if tableType == ASSERTED_TYPE_PARTITION:
                cols = [c.member, c.klass]
            elif tableType in (ASSERTED_LITERAL_PARTITION,
                               ASSERTED_NON_TYPE_PARTITION, QUOTED_PARTITION):
                cols = [c.subject, c.predicate, c.object]
            else:
                raise ValueError(
                    'Unrecognized table type {}'.format(tableType))
            select_clause = expression.select([
                functions.count().label('aCount')
            ]).select_from(
                expression.select(cols,
                                  whereClause).distinct().select_from(table))
        elif select_type == CONTEXT_SELECT:
            select_clause = expression.select([table.c.context], whereClause)
        elif tableType in FULL_TRIPLE_PARTITIONS:
            select_clause = table.select(whereClause)
        elif tableType == ASSERTED_TYPE_PARTITION:
            select_clause = expression.select([
                table.c.id.label("id"),
                table.c.member.label("subject"),
                expression.literal(text_type(RDF.type)).label("predicate"),
                table.c.klass.label("object"),
                table.c.context.label("context"),
                table.c.termComb.label("termcomb"),
                expression.literal_column("NULL").label("objlanguage"),
                expression.literal_column("NULL").label("objdatatype")
            ], whereClause)
        elif tableType == ASSERTED_NON_TYPE_PARTITION:
            select_clause = expression.select([c for c in table.columns] + [
                expression.literal_column("NULL").label("objlanguage"),
                expression.literal_column("NULL").label("objdatatype")
            ],
                                              whereClause,
                                              from_obj=[table])

        selects.append(select_clause)

    order_statement = []
    if select_type == TRIPLE_SELECT:
        order_statement = [
            expression.literal_column("subject"),
            expression.literal_column("predicate"),
            expression.literal_column("object"),
        ]
    if distinct and select_type != COUNT_SELECT:
        return expression.union(*selects, **{"order_by": order_statement})
    else:
        return expression.union_all(*selects, **{"order_by": order_statement})
Пример #49
0
    def get_data(self, player_id):
        """Return player data as dict.

        This function is similar to the function in player.py but more optimized
        for this purpose.
        """
        # total games
        # wins/losses
        # kills/deaths
        # duel/dm/tdm/ctf elo + rank

        player = DBSession.query(Player).filter(
            Player.player_id == player_id).one()

        games_played = DBSession.query(
                Game.game_type_cd, func.count(), func.sum(PlayerGameStat.alivetime)).\
                filter(Game.game_id == PlayerGameStat.game_id).\
                filter(PlayerGameStat.player_id == player_id).\
                group_by(Game.game_type_cd).\
                order_by(func.count().desc()).\
                all()

        total_stats = {}
        total_stats['games'] = 0
        total_stats['games_breakdown'] = {
        }  # this is a dictionary inside a dictionary .. dictception?
        total_stats['games_alivetime'] = {}
        total_stats['gametypes'] = []
        for (game_type_cd, games, alivetime) in games_played:
            total_stats['games'] += games
            total_stats['gametypes'].append(game_type_cd)
            total_stats['games_breakdown'][game_type_cd] = games
            total_stats['games_alivetime'][game_type_cd] = alivetime

        (total_stats['kills'], total_stats['deaths'], total_stats['alivetime'],) = DBSession.query(
                func.sum(PlayerGameStat.kills),
                func.sum(PlayerGameStat.deaths),
                func.sum(PlayerGameStat.alivetime)).\
                filter(PlayerGameStat.player_id == player_id).\
                one()

        (total_stats['wins'], total_stats['losses']) = DBSession.\
                query("wins", "losses").\
                from_statement(
                    "SELECT SUM(win) wins, SUM(loss) losses "
                    "FROM   (SELECT  g.game_id, "
                    "                CASE "
                    "                  WHEN g.winner = pgs.team THEN 1 "
                    "                  WHEN pgs.rank = 1 THEN 1 "
                    "                  ELSE 0 "
                    "                END win, "
                    "                CASE "
                    "                  WHEN g.winner = pgs.team THEN 0 "
                    "                  WHEN pgs.rank = 1 THEN 0 "
                    "                  ELSE 1 "
                    "                END loss "
                    "        FROM    games g, "
                    "                player_game_stats pgs "
                    "        WHERE   g.game_id = pgs.game_id "
                    "                AND pgs.player_id = :player_id) win_loss").\
                params(player_id=player_id).one()

        ranks = DBSession.query("game_type_cd", "rank", "max_rank").\
                from_statement(
                    "SELECT  pr.game_type_cd, pr.rank, overall.max_rank "
                    "FROM    player_ranks pr, "
                    "        (SELECT  game_type_cd, max(rank) max_rank "
                    "        FROM     player_ranks "
                    "        GROUP BY game_type_cd) overall "
                    "WHERE   pr.game_type_cd = overall.game_type_cd  "
                    "        AND player_id = :player_id "
                    "ORDER BY rank").\
                params(player_id=player_id).all()

        ranks_dict = {}
        for gtc, rank, max_rank in ranks:
            ranks_dict[gtc] = (rank, max_rank)

        elos = DBSession.query(PlayerElo).\
                filter_by(player_id=player_id).\
                order_by(PlayerElo.elo.desc()).\
                all()

        elos_dict = {}
        for elo in elos:
            if elo.games >= 32:
                elos_dict[elo.game_type_cd] = elo.elo

        self.data = {
            'player': player,
            'total_stats': total_stats,
            'ranks': ranks_dict,
            'elos': elos_dict,
        }
Пример #50
0
from flask import current_app, render_template, request, flash, redirect

from abilian.i18n import _, _l, _n
from abilian.core.models.tag import Tag, entity_tag_tbl
from abilian.core.entities import Entity
from abilian.web import url_for
from abilian.web.admin import AdminPanel
from abilian.web.views.base import View
from abilian.web.views import ObjectEdit
from abilian.services.indexing.service import index_update

from .forms import TagForm

logger = logging.getLogger(__name__)

_OBJ_COUNT = functions.count(entity_tag_tbl.c.entity_id).label('obj_count')


def get_entities_for_reindex(tags):
    """
  Collect entities for theses tags.
  """
    if isinstance(tags, Tag):
        tags = (tags, )

    session = current_app.db.session()
    indexing = current_app.services['indexing']
    tbl = Entity.__table__
    tag_ids = [t.id for t in tags]
    q = sa.sql.select([tbl.c.entity_type, tbl.c.id])\
              .select_from(
Пример #51
0
    def test_sources(self):

        with NamedTemporaryFile() as f:

            args, db = bootstrap_file(f, m(V), '5', configurator=acooke)

            with db.session_context() as s:

                # add a diary entry

                diary = s.query(Topic).filter(Topic.name == 'Diary').one()
                d = add(s, TopicJournal(topic=diary, date='2018-09-29'))
                d.populate(log, s)
                self.assertEqual(len(d.topic.fields), 9,
                                 list(enumerate(map(str, d.topic.fields))))
                self.assertEqual(d.topic.fields[0].statistic_name.name,
                                 'Notes')
                self.assertEqual(d.topic.fields[1].statistic_name.name,
                                 'Weight', str(d.topic.fields[1]))
                for field in d.topic.fields:
                    if field in d.statistics:
                        self.assertTrue(d.statistics[field].value is None,
                                        field)
                d.statistics[d.topic.fields[0]].value = 'hello world'
                d.statistics[d.topic.fields[1]].value = 64.5

            with db.session_context() as s:

                # check the diary entry was persisted

                diary = s.query(Topic).filter(Topic.name == 'Diary').one()
                d = s.query(TopicJournal).filter(
                    TopicJournal.topic == diary,
                    TopicJournal.date == '2018-09-29').one()
                s.flush()
                d.populate(log, s)
                self.assertEqual(len(d.topic.fields), 9,
                                 list(enumerate(map(str, d.topic.fields))))
                self.assertEqual(d.topic.fields[0].statistic_name.name,
                                 'Notes')
                self.assertEqual(d.statistics[d.topic.fields[0]].value,
                                 'hello world')
                self.assertEqual(d.topic.fields[1].statistic_name.name,
                                 'Weight')
                self.assertEqual(d.statistics[d.topic.fields[1]].value, 64.5)
                self.assertEqual(d.statistics[d.topic.fields[1]].type,
                                 StatisticJournalType.FLOAT)

            # generate summary stats

            SummaryCalculator(db, schedule='m').run()
            SummaryCalculator(db, schedule='y').run()

            with db.session_context() as s:

                # check the summary stats

                diary = s.query(Topic).filter(Topic.name == 'Diary').one()
                weight = s.query(StatisticJournal).join(StatisticName). \
                    filter(StatisticName.owner == diary, StatisticName.name == 'Weight').one()
                self.assertEqual(weight.value, 64.5)
                self.assertEqual(len(weight.measures), 2, weight.measures)
                self.assertEqual(weight.measures[0].rank, 1)
                self.assertEqual(weight.measures[0].percentile, 100,
                                 weight.measures[0].percentile)
                n = s.query(count(StatisticJournalFloat.id)).scalar()
                self.assertEqual(n, 4, n)
                n = s.query(count(StatisticJournalInteger.id)).scalar()
                self.assertEqual(n, 11, n)
                m_avg = s.query(StatisticJournalFloat).join(StatisticName). \
                    filter(StatisticName.name == 'Avg/Month Weight').one()
                self.assertEqual(m_avg.value, 64.5)
                y_avg = s.query(StatisticJournalFloat).join(StatisticName). \
                    filter(StatisticName.name == 'Avg/Year Weight').one()
                self.assertEqual(y_avg.value, 64.5)
                month = s.query(Interval).filter(
                    Interval.schedule == 'm').one()
                self.assertEqual(month.start, to_date('2018-09-01'),
                                 month.start)
                self.assertEqual(month.finish, to_date('2018-10-01'),
                                 month.finish)

            with db.session_context() as s:

                # delete the diary entry

                diary = s.query(Topic).filter(Topic.name == 'Diary').one()
                d = s.query(TopicJournal).filter(
                    TopicJournal.topic == diary,
                    TopicJournal.date == '2018-09-29').one()
                s.delete(d)

            run('sqlite3 %s ".dump"' % f.name, shell=True)

            with db.session_context() as s:

                # check the delete cascade

                self.assertEqual(s.query(count(TopicJournal.id)).scalar(), 0)
                # this should be zero because the Intervals were automatically deleted
                for source in s.query(Source).all():
                    print(source)
                for journal in s.query(StatisticJournal).all():
                    print(journal)
                self.assertEqual(
                    s.query(count(Source.id)).scalar(), 11,
                    list(map(str,
                             s.query(Source).all())))  # constants
                self.assertEqual(
                    s.query(count(StatisticJournalText.id)).scalar(), 7,
                    s.query(count(StatisticJournalText.id)).scalar())
                self.assertEqual(
                    s.query(count(StatisticJournal.id)).scalar(), 7,
                    s.query(count(StatisticJournal.id)).scalar())
Пример #52
0
async def contractor_list(request):  # noqa: C901 (ignore complexity)
    sort_val = request.query.get('sort')
    sort_col = SORT_OPTIONS.get(sort_val, SORT_OPTIONS['last_updated'])

    pagination, offset = get_pagination(request, 100, 100)

    company = request['company']
    options = company.options or {}
    fields = (
        c.id,
        c.first_name,
        c.last_name,
        c.tag_line,
        c.primary_description,
        c.town,
        c.country,
        c.photo_hash,
    )
    show_labels = options.get('show_labels')
    if show_labels:
        fields += (c.labels, )

    show_stars = options.get('show_stars')
    if show_stars:
        fields += (c.review_rating, )

    show_hours_reviewed = options.get('show_hours_reviewed')
    if show_hours_reviewed:
        fields += (c.review_duration, )

    where = (c.company == company.id, )

    subject_filter = get_arg(request, 'subject')
    qual_level_filter = get_arg(request, 'qual_level')

    select_from = None
    if subject_filter or qual_level_filter:
        select_from = sa_contractors.join(sa_con_skills)
        if subject_filter:
            select_from = select_from.join(sa_subjects)
            where += (sa_subjects.c.id == subject_filter, )
        if qual_level_filter:
            select_from = select_from.join(sa_qual_levels)
            where += (sa_qual_levels.c.id == qual_level_filter, )

    labels_filter = request.query.getall('label', [])
    labels_exclude_filter = request.query.getall('label_exclude', [])
    if labels_filter:
        where += (c.labels.contains(cast(labels_filter, ARRAY(String(255)))), )
    if labels_exclude_filter:
        where += (or_(
            ~c.labels.overlap(cast(labels_exclude_filter, ARRAY(String(255)))),
            c.labels.is_(None)), )

    location = await geocode(request)
    inc_distance = None
    if location:
        if location.get('error'):
            return json_response(
                request,
                location=location,
                results=[],
                count=0,
            )
        max_distance = get_arg(request, 'max_distance', default=80_000)
        inc_distance = True
        request_loc = func.ll_to_earth(location['lat'], location['lng'])
        con_loc = func.ll_to_earth(c.latitude, c.longitude)
        distance_func = func.earth_distance(request_loc, con_loc)
        where += (distance_func < max_distance, )
        fields += (distance_func.label('distance'), )
        sort_col = distance_func

    distinct_cols = sort_col, c.id
    if sort_col == c.review_rating:
        sort_on = c.review_rating.desc().nullslast(), c.review_duration.desc(
        ).nullslast(), c.id
        distinct_cols = c.review_rating, c.review_duration, c.id
    elif sort_col == c.last_updated:
        sort_on = sort_col.desc(), c.id
    else:
        sort_on = sort_col.asc(), c.id

    q_iter = (select(fields).where(and_(*where)).order_by(*sort_on).distinct(
        *distinct_cols).offset(offset).limit(pagination))
    q_count = select([sql_f.count(distinct(c.id))]).where(and_(*where))
    if select_from is not None:
        q_iter = q_iter.select_from(select_from)
        q_count = q_count.select_from(select_from)

    results = []
    name_display = company.name_display
    conn = await request['conn_manager'].get_connection()
    async for row in conn.execute(q_iter):
        name = _get_name(name_display, row)
        con = dict(
            id=row.id,
            url=route_url(request,
                          'contractor-get',
                          company=company.public_key,
                          id=row.id),
            link=f'{row.id}-{slugify(name)}',
            name=name,
            tag_line=row.tag_line,
            primary_description=row.primary_description,
            town=row.town,
            country=row.country,
            photo=_photo_url(request, row, True),
            distance=inc_distance and int(row.distance),
        )
        if show_labels:
            con['labels'] = row.labels or []
        if show_stars:
            con['review_rating'] = row.review_rating
        if show_hours_reviewed:
            con['review_duration'] = row.review_duration
        results.append(con)

    cur_count = await conn.execute(q_count)
    return json_response(
        request,
        location=location,
        results=results,
        count=(await cur_count.first())[0],
    )
Пример #53
0
def getMediaCounts():
    return db.session.query(CollectionMedia.collection_id,
                            func.count(CollectionMedia.media_id)).group_by(
                                CollectionMedia.collection_id).all()
Пример #54
0
    def get_all_study_summaries(self) -> List[StudySummary]:

        with _create_scoped_session(self.scoped_session) as session:
            summarized_trial = (
                session.query(
                    models.TrialModel.study_id,
                    functions.min(models.TrialModel.datetime_start).label("datetime_start"),
                    functions.count(models.TrialModel.trial_id).label("n_trial"),
                )
                .group_by(models.TrialModel.study_id)
                .with_labels()
                .subquery()
            )
            study_summary_stmt = session.query(
                models.StudyModel.study_id,
                models.StudyModel.study_name,
                summarized_trial.c.datetime_start,
                functions.coalesce(summarized_trial.c.n_trial, 0).label("n_trial"),
            ).select_from(orm.outerjoin(models.StudyModel, summarized_trial))

            study_summary = study_summary_stmt.all()

            _directions = defaultdict(list)
            for d in session.query(models.StudyDirectionModel).all():
                _directions[d.study_id].append(d.direction)

            _user_attrs = defaultdict(list)
            for a in session.query(models.StudyUserAttributeModel).all():
                _user_attrs[d.study_id].append(a)

            _system_attrs = defaultdict(list)
            for a in session.query(models.StudySystemAttributeModel).all():
                _system_attrs[d.study_id].append(a)

            study_summaries = []
            for study in study_summary:
                directions = _directions[study.study_id]
                best_trial: Optional[models.TrialModel] = None
                try:
                    if len(directions) > 1:
                        raise ValueError
                    elif directions[0] == StudyDirection.MAXIMIZE:
                        best_trial = models.TrialModel.find_max_value_trial(
                            study.study_id, 0, session
                        )
                    else:
                        best_trial = models.TrialModel.find_min_value_trial(
                            study.study_id, 0, session
                        )
                except ValueError:
                    best_trial_frozen: Optional[FrozenTrial] = None
                if best_trial:
                    value = models.TrialValueModel.find_by_trial_and_objective(
                        best_trial, 0, session
                    )
                    assert value
                    params = (
                        session.query(
                            models.TrialParamModel.param_name,
                            models.TrialParamModel.param_value,
                            models.TrialParamModel.distribution_json,
                        )
                        .filter(models.TrialParamModel.trial_id == best_trial.trial_id)
                        .all()
                    )
                    param_dict = {}
                    param_distributions = {}
                    for param in params:
                        distribution = distributions.json_to_distribution(param.distribution_json)
                        param_dict[param.param_name] = distribution.to_external_repr(
                            param.param_value
                        )
                        param_distributions[param.param_name] = distribution
                    user_attrs = models.TrialUserAttributeModel.where_trial_id(
                        best_trial.trial_id, session
                    )
                    system_attrs = models.TrialSystemAttributeModel.where_trial_id(
                        best_trial.trial_id, session
                    )
                    intermediate = models.TrialIntermediateValueModel.where_trial_id(
                        best_trial.trial_id, session
                    )
                    best_trial_frozen = FrozenTrial(
                        best_trial.number,
                        TrialState.COMPLETE,
                        value.value,
                        best_trial.datetime_start,
                        best_trial.datetime_complete,
                        param_dict,
                        param_distributions,
                        {i.key: json.loads(i.value_json) for i in user_attrs},
                        {i.key: json.loads(i.value_json) for i in system_attrs},
                        {value.step: value.intermediate_value for value in intermediate},
                        best_trial.trial_id,
                    )
                user_attrs = _user_attrs.get(study.study_id, [])
                system_attrs = _system_attrs.get(study.study_id, [])
                study_summaries.append(
                    StudySummary(
                        study_name=study.study_name,
                        direction=None,
                        directions=directions,
                        best_trial=best_trial_frozen,
                        user_attrs={i.key: json.loads(i.value_json) for i in user_attrs},
                        system_attrs={i.key: json.loads(i.value_json) for i in system_attrs},
                        n_trials=study.n_trial,
                        datetime_start=study.datetime_start,
                        study_id=study.study_id,
                    )
                )

        return study_summaries
Пример #55
0
def _search_with_args(sess, ret_type, parameters):
    u'''
    返り値型と引数の型による検索
    '''

    u'''
    -- 大体こんなことがしたい
    SELECT method.id, method.name, method.fqname FROM methods
      JOIN types rtype on methods.return_type = rtype.id
      JOIN methodargs marg on methods.id = marg.method_id
      JOIN types atype on marg.type = atype.id
    WHERE
      method.argcount = ${len(parameters)} AND
      rtype.name = ${rtype} AND
      methodargs.order = ${paramidx} AND
      atype.name = ${parameters[paramidx]}
      ...
    GROUP BY method.id
    HAVING
      COUNT(*) = ${len(parameters)}
    '''

    method = tables.Method.__table__
    marg = tables.MethodArg.__table__
    rtype = tables.Type.__table__.alias('rtype')
    atype = tables.Type.__table__.alias('atype')

    joined = method.join(marg, method.c.id == marg.c.method_id)
    joined = joined.join(atype, marg.c.type == atype.c.id)
    joined = joined.join(rtype, method.c.return_type == rtype.c.id)

    def make_param_query(idx, param):

        if param == '*':
            return marg.c.order == idx

        return sql.and_(marg.c.order == idx,
                        atype.c.name == param)

    argquery = sql.or_(*[make_param_query(idx, param)
                         for idx, param in enumerate(parameters)])

    if ret_type:
        where = sql.and_(method.c.argcount == len(parameters),
                         rtype.c.name == ret_type,
                         argquery)
    else:
        where = sql.and_(method.c.argcount == len(parameters),
                         argquery)


    # XXX: sqlite 以外に対応できない
    # func.group_concat(methodargs.order + ':' + atype.c.name)
    # とやりたいところだけど、 SQL がまともに生成されなかったので諦めた
    tmpl = '''group_concat(methodargs.`order` || ':' || {0}) as {1}'''

    args = tmpl.format(atype.c.name, 'name')
    fqargs = tmpl.format(atype.c.fqname, 'fqname')

    columns = [method.c.id, method.c.name, method.c.fqname, method.c.return_type, method.c.modifiers,
               rtype.c.name, rtype.c.fqname, args, fqargs]

    query = sql.select(columns, where, joined, use_labels=True)
    query = query.group_by(method.c.id).having(functions.count() == len(parameters)).order_by(method.c.fqname)

    results = sess.execute(query)


    def make_args(names, fqnames):
        names = dict(x.split(':') for x in  names.split(','))
        fqnames = dict(x.split(':') for x in  fqnames.split(','))

        return [dict(name=names[idx],
                     fully_qualified=fqnames[idx])
                for idx in map(str, range(len(parameters)))]


    def make_dict(r):
        return dict(name=r[method.c.name],
                    fully_qualified=r[method.c.fqname],
                    return_type=dict(name=r[rtype.c.name],
                                     fully_qualified=r[rtype.c.fqname]),
                    args=make_args(r['name'], r['fqname']),
                    modifiers=r[method.c.modifiers])


    return [make_dict(x) for x in results]
Пример #56
0
def report(task_id):
    # 获取筛选数据
    search_vul_type = request.args.get("search_vul_type", None)
    search_rule = request.args.get("search_rule", None)
    search_level = request.args.get("search_level", None)
    # 当前页码,默认为第一页
    page = int(request.args.get("page", 1))

    # 检测 task id 是否存在
    task_info = CobraTaskInfo.query.filter_by(id=task_id).first()
    if not task_info:
        return jsonify(status="4004", msg="report id not found.")

    # 获取task的信息
    repository = task_info.target
    task_created_at = task_info.created_at
    time_consume = task_info.time_consume
    time_start = task_info.time_start
    time_end = task_info.time_end
    files = task_info.file_count
    code_number = task_info.code_number
    if code_number is None or code_number == 0:
        code_number = u"统计中..."
    else:
        code_number = common.convert_number(code_number)

    # 把时间戳转换成datetime
    time_start = time.strftime("%H:%M:%S", time.localtime(time_start))
    time_end = time.strftime("%H:%M:%S", time.localtime(time_end))

    # 获取project信息
    project = CobraProjects.query.filter_by(repository=repository).first()
    if project is None:
        project_name = repository
        author = 'Anonymous'
        project_description = 'Compress Project'
        project_framework = 'Unknown Framework'
        project_url = 'Unknown URL'
    else:
        project_name = project.name
        author = project.author
        project_description = project.remark
        project_framework = project.framework
        project_url = project.url

    # 获取漏洞总数量
    scan_results = CobraResults.query.filter_by(task_id=task_id).all()
    total_vul_count = len(scan_results)

    # 获取出现的漏洞类型
    res = db.session.query(count().label("vul_number"), CobraVuls.name).filter(
        and_(
            CobraResults.task_id == task_id,
            CobraResults.rule_id == CobraRules.id,
            CobraVuls.id == CobraRules.vul_id,
        )
    ).group_by(CobraVuls.name).all()
    # 提供给筛选列表
    select_vul_type = list()
    # 存下每种漏洞数量
    chart_vuls_number = list()
    for r in res:
        select_vul_type.append(r[1])
        chart_vuls_number.append({"vuls_name": r[1], "vuls_number": r[0]})

    # 获取触发的规则类型
    res = db.session.query(CobraRules.description).filter(
        and_(
            CobraResults.task_id == task_id,
            CobraResults.rule_id == CobraRules.id,
            CobraVuls.id == CobraRules.vul_id
        )
    ).group_by(CobraRules.description).all()
    select_rule_type = list()
    for r in res:
        select_rule_type.append(r[0])

    # 检索不同等级的漏洞数量
    res = db.session.query(count().label('vuln_number'), CobraRules.level).filter(
        and_(
            CobraResults.task_id == task_id,
            CobraResults.rule_id == CobraRules.id,
            CobraVuls.id == CobraRules.vul_id,
        )
    ).group_by(CobraRules.level).all()
    low_amount = medium_amount = high_amount = unknown_amount = 0
    for every_level in res:
        """
        低危:1
        中危:2
        高危:3
        未定义:其他值
        """
        if every_level[1] == 1:
            low_amount = every_level[0]
        elif every_level[1] == 2:
            medium_amount = every_level[0]
        elif every_level[1] == 3:
            high_amount = every_level[0]
        else:
            unknown_amount = every_level[0]

    # 检索全部的漏洞信息
    filter_group = (
        CobraResults.task_id == task_id,
        CobraResults.rule_id == CobraRules.id,
        CobraVuls.id == CobraRules.vul_id,
    )

    # 根据传入的筛选条件添加SQL的条件
    if search_vul_type is not None and search_vul_type != "all":
        filter_group += (CobraVuls.name == search_vul_type,)
    if search_rule is not None and search_rule != "all":
        filter_group += (CobraRules.description == search_rule,)
    if search_level is not None and search_level != "all":
        filter_group += (CobraRules.level == search_level,)

    # 构建SQL语句
    all_scan_results = db.session.query(
        CobraResults.file, CobraResults.line, CobraResults.code, CobraRules.description, CobraRules.level,
        CobraRules.regex_location, CobraRules.regex_repair, CobraRules.repair, CobraVuls.name
    ).filter(
        *filter_group
    )
    page_size = 5
    total_number = all_scan_results.all()
    total_pages = len(total_number) / page_size + 1
    all_scan_results = all_scan_results.limit(page_size).offset((page - 1) * page_size).all()

    # 处理漏洞信息
    vulnerabilities = list()
    map_level = ["未定义", "低危", "中危", "高危"]
    map_color = ["#555", "black", "orange", "red"]
    current_url = ''
    for result in all_scan_results:

        # 生成data数据
        data_dict = dict()
        data_dict["file"] = result[0]
        data_dict["line"] = result[1]
        data_dict["code"] = result[2]
        data_dict["rule"] = result[3]
        data_dict["level"] = map_level[result[4]]
        data_dict["color"] = map_color[result[4]]
        data_dict["repair"] = result[7]
        data_dict['verify'] = ''
        if project_framework != '':
            for rule in detection.Detection().rules:
                if rule['name'] == project_framework:
                    if 'public' in rule:
                        if result.file[:len(rule['public'])] == rule['public']:
                            data_dict['verify'] = project_url + result.file.replace(rule['public'], '')

        # 检索vulnerabilities中是否存在vul_type的类别
        # 如果存在就添加到对应的data字典中
        # 否则就新建一下
        found = False
        for v in vulnerabilities:
            if v["vul_type"] == result[-1]:
                # 直接添加
                v["data"].append(data_dict)
                # 修改标志
                found = True
                break
        # 没有找到
        if not found:
            temp_dict = dict(vul_type=result[-1], data=list())
            temp_dict["data"].append(data_dict)
            vulnerabilities.append(temp_dict)

        current_url = request.url.replace("&page={}".format(page), "").replace("page={}".format(page), "")
        if "?" not in current_url:
            current_url += "?"

    data = {
        'id': int(task_id),
        'project_name': project_name,
        'project_repository': repository,
        'project_description': project_description,
        'project_url': project_url,
        'project_framework': project_framework,
        'author': author,
        'task_created_at': task_created_at,
        'time_consume': common.convert_time(time_consume),
        'time_start': time_start,
        'time_end': time_end,
        'files': common.convert_number(files),
        'code_number': code_number,
        'vul_count': common.convert_number(total_vul_count),
        'vulnerabilities': vulnerabilities,
        "select_vul_type": select_vul_type,
        "select_rule_type": select_rule_type,
        "chart_vuls_number": chart_vuls_number,
        "current_page": page,
        "total_pages": total_pages,
        "filter_vul_number": len(total_number),
        "current_url": current_url,
        'amount': {
            'h': high_amount,
            'm': medium_amount,
            'l': low_amount,
            'u': unknown_amount
        },
    }
    return render_template('report.html', data=data)
Пример #57
0
class HighlightTag(Base):
    __tablename__ = 'highlight_tags'

    highlight_id = Column(Integer, ForeignKey('highlights.id',
                                              ondelete='CASCADE'),
                          primary_key=True)
    highlight = relationship('Highlight')
    tag_id = Column(Integer, ForeignKey('tags.id',
                                        ondelete='CASCADE'),
                    primary_key=True)
    tag = relationship('Tag')


Tag.highlights_count = column_property(
    select(
        [functions.count(HighlightTag.highlight_id)],
    ).where(
        HighlightTag.tag_id == Tag.id,
    ).correlate_except(HighlightTag)
)


def connect(db_url):
    """Connect to the database using an environment variable.
    """
    logger.info("Connecting to SQL database %r", db_url)
    kwargs = {}
    if db_url.startswith('sqlite:'):
        kwargs['connect_args'] = {'check_same_thread': False}
    engine = create_engine(db_url, **kwargs)
    # logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
Пример #58
0
def _samp_compute_up(t, s, **kwargs):
    if t.n is not None:
        limit = t.n
    else:
        limit = select([safuncs.count() * t.frac]).as_scalar()
    return s.order_by(safuncs.random()).limit(limit)
Пример #59
0
    def get_all_study_summaries(self) -> List[StudySummary]:

        session = self.scoped_session()

        summarized_trial = (session.query(
            models.TrialModel.study_id,
            functions.min(
                models.TrialModel.datetime_start).label("datetime_start"),
            functions.count(models.TrialModel.trial_id).label("n_trial"),
        ).group_by(models.TrialModel.study_id).with_labels().subquery())
        study_summary_stmt = session.query(
            models.StudyModel.study_id,
            models.StudyModel.study_name,
            models.StudyModel.direction,
            summarized_trial.c.datetime_start,
            functions.coalesce(summarized_trial.c.n_trial, 0).label("n_trial"),
        ).select_from(orm.outerjoin(models.StudyModel, summarized_trial))

        study_summary = study_summary_stmt.all()
        study_summaries = []
        for study in study_summary:
            best_trial: Optional[models.TrialModel] = None
            try:
                if study.direction == StudyDirection.MAXIMIZE:
                    best_trial = models.TrialModel.find_max_value_trial(
                        study.study_id, session)
                else:
                    best_trial = models.TrialModel.find_min_value_trial(
                        study.study_id, session)
            except ValueError:
                best_trial_frozen: Optional[FrozenTrial] = None
            if best_trial:
                params = (session.query(
                    models.TrialParamModel.param_name,
                    models.TrialParamModel.param_value,
                    models.TrialParamModel.distribution_json,
                ).filter(models.TrialParamModel.trial_id ==
                         best_trial.trial_id).all())
                param_dict = {}
                param_distributions = {}
                for param in params:
                    distribution = distributions.json_to_distribution(
                        param.distribution_json)
                    param_dict[
                        param.param_name] = distribution.to_external_repr(
                            param.param_value)
                    param_distributions[param.param_name] = distribution
                user_attrs = session.query(
                    models.TrialUserAttributeModel).filter(
                        models.TrialUserAttributeModel.trial_id ==
                        best_trial.trial_id)
                system_attrs = session.query(
                    models.TrialSystemAttributeModel).filter(
                        models.TrialSystemAttributeModel.trial_id ==
                        best_trial.trial_id)
                intermediate = session.query(models.TrialValueModel).filter(
                    models.TrialValueModel.trial_id == best_trial.trial_id)
                best_trial_frozen = FrozenTrial(
                    best_trial.number,
                    TrialState.COMPLETE,
                    best_trial.value,
                    best_trial.datetime_start,
                    best_trial.datetime_complete,
                    param_dict,
                    param_distributions,
                    {i.key: json.loads(i.value_json)
                     for i in user_attrs},
                    {i.key: json.loads(i.value_json)
                     for i in system_attrs},
                    {value.step: value.value
                     for value in intermediate},
                    best_trial.trial_id,
                )
            user_attrs = session.query(models.StudyUserAttributeModel).filter(
                models.StudyUserAttributeModel.study_id == study.study_id)
            system_attrs = session.query(
                models.StudySystemAttributeModel).filter(
                    models.StudySystemAttributeModel.study_id ==
                    study.study_id)
            study_summaries.append(
                StudySummary(
                    study_name=study.study_name,
                    direction=study.direction,
                    best_trial=best_trial_frozen,
                    user_attrs={
                        i.key: json.loads(i.value_json)
                        for i in user_attrs
                    },
                    system_attrs={
                        i.key: json.loads(i.value_json)
                        for i in system_attrs
                    },
                    n_trials=study.n_trial,
                    datetime_start=study.datetime_start,
                    study_id=study.study_id,
                ))

        # Terminate transaction explicitly to avoid connection timeout during transaction.
        self._commit(session)

        return study_summaries
Пример #60
0
async def check_if_user_exist(session, email):
    user = await session.execute(
        select(count(User.id)).where(User.email == email and User.is_active))
    return bool(user.scalars().first())