Beispiel #1
0
def create_country_stats(session, date, logger=None):
    if logger is None:
        logger = app.logger

    (start, end) = date_to_timestamps(date)

    # First kill the stats for the selected date
    deleted_counter = session.query(CountryStats).filter(
        CountryStats.date == date).delete()

    country_stats = (session.query(
        literal(date), Country.gid,
        func.count(AircraftBeacon.timestamp).label("aircraft_beacon_count"),
        func.count(func.distinct(
            AircraftBeacon.receiver_id)).label("device_count")).filter(
                between(AircraftBeacon.timestamp, start, end)).filter(
                    func.st_contains(Country.geom,
                                     AircraftBeacon.location)).group_by(
                                         Country.gid).subquery())

    # And insert them
    ins = insert(CountryStats).from_select([
        CountryStats.date, CountryStats.country_id,
        CountryStats.aircraft_beacon_count, CountryStats.device_count
    ], country_stats)
    res = session.execute(ins)
    insert_counter = res.rowcount
    session.commit()
Beispiel #2
0
    def _generate_monthly_uniques(self, table, tables):
        idx = tables.index(table)

        # Join them all
        at = union_all(*[
            select([tbl]).where(tbl.c.message == "Ack")
            for tbl in tables[idx-29:idx+1]
        ])

        # Get uniques
        s = select([
            func.count(func.distinct(at.c.uaid_hash)).
            label("unique_count"),
            at.c.browser_os,
            at.c.browser_version,
        ]).\
            group_by(at.c.browser_os, at.c.browser_version)
        results = self._conn.execute(s).fetchall()
        if not results:
            return

        # Determine the date for this entry
        tname = table.name
        date_parts = [tname[-8:-4], tname[-4:-2], tname[-2:]]
        insert_date = "-".join(date_parts)

        self._conn.execute(monthly_rollup.insert(), [
            dict(date=insert_date,
                 count=x.unique_count,
                 browser_os=x.browser_os,
                 browser_version=x.browser_version)
            for x in results
        ])
    def _filter_events(klass,
                       events,
                       query=None,
                       categories=None,
                       tags=None,
                       flags=None):
        query_tags = None
        if query:
            tags_matching_query = Tag.query.filter(
                Tag.tag_name.ilike("{}%".format(query)))
            query_tags = {t.tag_name for t in tags_matching_query}

            if query_tags:
                if tags is None:
                    tags = query_tags
                else:
                    tags |= query_tags
            else:
                events = klass._filter_events_by_query(events=events,
                                                       query=query)

        if tags or categories:
            # Filter out valid_tags
            valid_tags = db_session.query(func.distinct(Tag.tag_name)).filter(
                Tag.tag_name.in_(tags))
            tags = set(t[0] for t in valid_tags)

            events = klass._filter_events_by_tags(events,
                                                  tags=tags,
                                                  categories=categories)

        if flags:
            events = klass._filter_events_by_flags(events, flags=flags)

        return events, tags
Beispiel #4
0
    def __init__(self, doc_ids):
        # we use these to filter our queries, rather than trying to pull
        # complex filter logic into our view queries
        self.doc_ids = doc_ids
        self.formats = {}

        # map from a score name to its row in the score sheet
        self.score_row = {}
        self.n_columns = 0

        # collect media headings
        medium_ids = self.filter(db.session.query(func.distinct(Document.medium_id)))
        self.media = Medium.query\
            .filter(Medium.id.in_(medium_ids))\
            .order_by(Medium.name)\
            .all()

        self.n_columns = len(self.media)
        self.score_col_start = 3

        # how nested are the ratings?
        def depth(ratings, deep=1):
            for rating in ratings:
                if len(rating) < 3:
                    yield deep
                else:
                    yield max(depth(rating[2], deep + 1))

        # the column at which the ratings for each medium starts
        self.rating_col_start = (max(depth(self.ratings)) - 1) * 2
Beispiel #5
0
    def get_user_count(self, ndays=7):
        """
        Fetch statistic in the ndays

        @return {
            date: {
                user_count,
                question_count
            },
            date: {
                user_count,
                question_count
            }...
        }
        """
        results = {}
        start_t = datetime.datetime.now()
        start_t -= datetime.timedelta(days=ndays)

        rows = self._sess.query(func.date(Histories.CreateAt), \
                        func.count(func.distinct(Histories.DeviceId)), \
                        func.count(Histories.id)) \
                   .filter(Histories.CreateAt >= func.date(start_t)) \
                   .group_by(func.date(Histories.CreateAt)) \
                   .all()

        for row in rows:
            (date, user_cnt, q_cnt) = row
            results[date] = {"question_count": q_cnt, "user_count": user_cnt}

        return results
Beispiel #6
0
def exact_count(query):
    'usable before sa0.5rc4'
    from sqlalchemy.sql import func
    m = mapper_of_query(query)
    #see Query.count()
    return query._col_aggregate(m.primary_key[0],
                                lambda x: func.count(func.distinct(x)))
Beispiel #7
0
def exact_count(query):
    "usable before sa0.5rc4"
    from sqlalchemy.sql import func

    m = mapper_of_query(query)
    # see Query.count()
    return query._col_aggregate(m.primary_key[0], lambda x: func.count(func.distinct(x)))
Beispiel #8
0
def generate_summary_data(user):
    ret = []
    user_query = Record.query.filter_by(user=user)
    years = user_query.with_entities(func.distinct(Record.year)).all()
    for year in years:
        months = user_query.filter_by(year=year[0]).with_entities(
            func.distinct(Record.month)).all()
        for month in months:
            node = {
                "year": year[0],
                "month": month[0],
                "worktime": user_query.filter_by(
                    year=year[0],
                    month=month[0]).with_entities(func.sum(Record.hours)
                                                  ).first()[0]
            }
            ret.append(node)
    return ret
Beispiel #9
0
def getMinMaxYearByELP(session, COD_ELP, listeInds):
    retour = {"min": 0, "max": 0, "ecart": 0}
    CI = aliased(tables.ConnexionINDSQLITE)
    minMaxYear = session.query(func.distinct(CI.DATE_CONN)).filter(CI.SESAME_ETU.in_(listeInds)).order_by(CI.DATE_CONN)
    if minMaxYear:
        retour["min"] = int(minMaxYear[0][0].split("/")[0])
        retour["max"] = int(minMaxYear[-1][0].split("/")[0])
        retour["ecart"] = retour["max"] - retour["min"]
    return retour
Beispiel #10
0
def count_trackback_pings(paper_id: str) -> int:
    """Count trackback pings for a particular document (paper_id)."""
    row = db.session.query(
            func.count(func.distinct(TrackbackPing.url)).label('num_pings')
          ).filter(TrackbackPing.document_id == Document.document_id) \
           .filter(Document.paper_id == paper_id) \
           .filter(TrackbackPing.status == 'accepted').first()

    return int(row.num_pings)
Beispiel #11
0
def customers(session, date):
    """
    Method returns the number of distinct customers on a given date
    :param session: database connection session
    :param date: date string in YYYY-MM-DD format
    :return: number of distinct customer ids on a given day
    """
    return len(
        session.query(func.distinct(Orders.customer_id))
        .filter(Orders.created_at.contains(date))
        .all()
    )
Beispiel #12
0
def commissions_order_average(session, date, total):
    """
    :param session: Database connection session
    :param date: Date string in YYYY-MM-DD format
    :param total: The total commissions on the specified date
    :return: Returns the average commission paid per order on a given date
    """
    return (
        total
        / session.query(func.count(func.distinct(Orders.id)))
        .filter(Orders.created_at.contains(date))
        .first()[0]
    )
Beispiel #13
0
    def count(cls, session, params, conditions=[], distinct=False):
        u"""计数.

        eg: BaseModel.count([BaseModel.id, BaseModel.XXX], [BaseModel.id==2])
            BaseModel.count(BaseModel.id, [BaseModel.id==2], True)
        """
        if distinct:
            if isinstance(params, Iterable) and len(params) >= 2:
                re = session.query(func.count(
                    func.distinct(func.concat(*params))))\
                    .filter(*conditions).one()[0]
            elif isinstance(params, Iterable):
                qp = params[0]
                re = session.query(func.count(
                    func.distinct(qp))).filter(*conditions).one()[0]
            else:
                re = session.query(func.count(
                    func.distinct(params))).filter(*conditions).one()[0]
        else:
            if not isinstance(params, Iterable):
                params = [params]
            re = session.query(*params).filter(*conditions).count()
        return re
Beispiel #14
0
def find_device(current_user):
    device_ids = [
        device_id for device_id, in db.session.query(
            func.distinct(Move.device_id)).join(User).join(Device).filter(
                Device.name != gpx_import.GPX_DEVICE_NAME).filter(
                    Move.user == current_user).all()
    ]

    if not device_ids:
        return None

    assert len(device_ids) == 1
    device_id = device_ids[0]
    device = db.session.query(Device).filter_by(id=device_id).one()
    return device
Beispiel #15
0
def make_facets(parent_alias, filter_func, parser_func):
    """ Return a set of facets based on the current query string. This
    will also consider filters set for the query, i.e. only show facets
    that match the current set of filters. """
    facets = {}
    for facet in request.args.getlist('facet'):
        parent_obj = parent_alias()
        q = db.session.query()
        facet_count = func.count(func.distinct(parent_obj.id))
        q = q.add_columns(facet_count)
        q = q.order_by(facet_count.desc())
        q = filter_func(q, parent_obj)
        q = parser_func(parent_obj, facet, facet, q)
        facets[facet] = Pager(q, name='facet_%s' % facet,
                              results_converter=results_process)
    return facets
Beispiel #16
0
def overview():
    if request.method == "GET":
        userid = int(session.get("userid"))
        overview = {}
        # 投资中金额,最大年利率,待收益,投资中的平台数去重复
        investinfo = db.session.query(
            func.sum(Invest.money).label("investment_money"),
            func.max(Invest.profit).label("investment_max_profit"),
            func.sum(Invest.lucre).label("investment_lucre"),
            func.count(func.distinct(Invest.id)).label("count_p2p")).filter_by(
                user_id=userid, status=0).first()
        # 7天内即将到期
        count_expire = db.session.query(
            func.count(Invest.id).label("expiring_invest")).filter(
                Invest.user_id == userid, Invest.status == 0,
                Invest.end_time >=
                (datetime.now() - timedelta(days=7))).first()
        # 投资已到期,需要确认
        expire_invest = db.session.query(
            func.count(Invest.id).label("expire_invest")).filter(
                Invest.user_id == userid, Invest.status == 1,
                Invest.end_time >= datetime.now()).first()
        # 提现,充值金额
        in_money = db.session.query(
            func.sum(BillFlow.money).label("in_money")).filter_by(
                user_id=userid, status=1, type=0).first()
        out_money = db.session.query(
            func.sum(BillFlow.money).label("out_money")).filter_by(
                user_id=userid, status=1, type=1).first()
        # 最近登录时间,ip
        loginfo = db.session.query(Loginlog.ip, Loginlog.addtime).filter_by(
            user_id=userid).order_by(Loginlog.id.desc()).first()
        print(in_money.in_money)
        overview["last_login_ip"] = loginfo.ip
        overview["last_login_time"] = str(loginfo.addtime)
        overview["in_money"] = float(in_money.in_money)
        overview["out_money"] = float(out_money.out_money)
        overview["investment_money"] = float(investinfo.investment_money)
        overview["investment_max_profit"] = float(
            investinfo.investment_max_profit)
        overview["investment_lucre"] = float(investinfo.investment_lucre)
        overview["count_p2p"] = int(investinfo.count_p2p)
        overview["expiring_invest"] = int(count_expire.expiring_invest)
        overview["expire_invest"] = int(expire_invest.expire_invest)
        return jsonify(overview)
Beispiel #17
0
    def _query_weekly_average(self, table, tables):
        # First see if we can find 6 days prior for a full week
        idx = tables.index(table)

        # For Python list math, 6 has 6 numbers before it as zero index
        # based, so 6 or larger is needed
        if idx < 6:
            return None

        # Get our weekly set together
        # Note that we add one to idx since list splicing needs one higher than
        # the index for right-side inclusive
        week_tables = union_all(*[
            select([tbl]).where(tbl.c.message == "Ack")
            for tbl in tables[idx-6:idx+1]
        ])

        # Calculate channels per user for the past week
        chans_per_user = select([
            week_tables.c.uaid_hash,
            func.count(func.distinct(week_tables.c.channel_id)).label("count")
        ]).\
            group_by(week_tables.c.uaid_hash)

        # Rank them into ntiles
        ranked = select([
            chans_per_user.c.uaid_hash,
            chans_per_user.c.count,
            func.ntile(100).over(order_by=text("count ASC")).label("rank"),
        ])

        # Remove the bottom/upper 5%, get sum/count for avg
        weekly_channels_stats = select([
            func.sum(ranked.c.count),
            func.count(ranked.c.uaid_hash),
        ]).\
            where(ranked.c.rank > 5).\
            where(ranked.c.rank <= 95)
        sums, count = self._conn.execute(weekly_channels_stats).fetchone()
        weekly_avg = Decimal(sums) / Decimal(count)
        return weekly_avg
def getLeaderboard():
    leaderboard = []
    sortedPlayers = Players.query.filter(Players.Ranking != 0).order_by(Players.Ranking).all()
    for player in sortedPlayers:
        row = {}
        row["First Name"] = player.FirstName
        row["Last Name"] = player.LastName
        row["Ranking"] = player.Ranking
        row["Series Wins"] = player.SeriesWins
        row["Game Wins"] = player.GameWins
        row["Game Losses"] = player.TotalGamesPlayed - player.GameWins
        row["Points Scored"] = player.TotalPoints
        row["Shutouts"] = player.Shutouts 
        row["Game Win %"] = round(100.0*player.GameWins / player.TotalGamesPlayed,2)
        numSeriesPlayed = db.session.query(func.distinct(Series.Id)).join(History).filter(History.PlayerId == player.Id).count()
        row["Series Win %"] = round(100.0*player.SeriesWins / numSeriesPlayed,2)
        row["Avg Points/Game"] = round(player.TotalPoints / player.TotalGamesPlayed,2)
        sumWinMargin = db.session.query(func.sum(Games.WinMargin)).join(History).filter(Games.Winner == History.Side).filter(History.PlayerId == player.Id).scalar()
        row["Avg Win Margin"] = round(sumWinMargin / player.TotalGamesPlayed,2)
        leaderboard.append(row)
    return json.dumps(leaderboard)
    def get_events(self,
                   query=None,
                   categories=None,
                   tags=None,
                   cities=None,
                   flags=None,
                   page=1,
                   future_only=False):
        current_user = UserController().current_user
        selected_categories = set(categories.split(',') if categories else [])
        selected_tags = set(tags.split(',') if tags else [])

        events_with_counts = db_session.query(
            Event,
            func.count(func.distinct(
                UserEvent.user_id)).label('ct')).outerjoin(
                    UserEvent, UserEvent.event_id == Event.event_id).group_by(
                        Event.event_id).order_by(desc('ct'))

        if current_user:
            current_user_events_table = alias(current_user.user_events(),
                                              'current_user_events_table')
            events_with_counts = events_with_counts.filter(~Event.event_id.in_(
                db_session.query(
                    current_user_events_table.c.user_events_event_id)))

        results, categories, tags, event_cities, results_table = self._process_events(
            events=events_with_counts,
            cities=cities,
            page=page,
            query=query,
            user=current_user,
            selected_categories=selected_categories,
            selected_tags=selected_tags,
            flags=flags,
            future_only=future_only)

        return results, categories, tags, event_cities
Beispiel #20
0
    def _generate_daily_uniques(self, table):
        s = select([
            func.count(func.distinct(table.c.uaid_hash)).
            label("unique_count"),
            cast(table.c.timestamp, Date).label("date"),
            table.c.browser_os,
            table.c.browser_version,
        ]).\
            where(table.c.message == "Ack").\
            group_by("date", table.c.browser_os,
                     table.c.browser_version)

        results = self._conn.execute(s).fetchall()
        if not results:
            return

        # Now build the insert for the rollup table, and insert
        self._conn.execute(daily_rollup.insert(), [
            dict(count=x.unique_count,
                 date=x.date,
                 browser_os=x.browser_os,
                 browser_version=x.browser_version)
            for x in results
        ])
Beispiel #21
0
def update_entries(session, date, logger=None):
    """Create receiver coverage stats for Melissas ognrange."""

    if logger is None:
        logger = app.logger

    logger.info("Compute receiver coverages.")

    (start, end) = date_to_timestamps(date)

    # Filter aircraft beacons
    sq = (session.query(
        AircraftBeacon.location_mgrs_short, AircraftBeacon.receiver_id,
        AircraftBeacon.signal_quality, AircraftBeacon.altitude,
        AircraftBeacon.device_id).filter(
            and_(between(AircraftBeacon.timestamp, start,
                         end), AircraftBeacon.location_mgrs_short != null(),
                 AircraftBeacon.receiver_id != null(),
                 AircraftBeacon.device_id != null())).subquery())

    # ... and group them by reduced MGRS, receiver and date
    query = (session.query(
        sq.c.location_mgrs_short,
        sq.c.receiver_id,
        func.cast(date, Date).label("date"),
        func.max(sq.c.signal_quality).label("max_signal_quality"),
        func.min(sq.c.altitude).label("min_altitude"),
        func.max(sq.c.altitude).label("max_altitude"),
        func.count(sq.c.altitude).label("aircraft_beacon_count"),
        func.count(func.distinct(sq.c.device_id)).label("device_count"),
    ).group_by(sq.c.location_mgrs_short, sq.c.receiver_id).subquery())

    # if a receiver coverage entry exist --> update it
    upd = (update(ReceiverCoverage).where(
        and_(
            ReceiverCoverage.location_mgrs_short ==
            query.c.location_mgrs_short,
            ReceiverCoverage.receiver_id == query.c.receiver_id,
            ReceiverCoverage.date == date)).values({
                "max_signal_quality":
                query.c.max_signal_quality,
                "min_altitude":
                query.c.min_altitude,
                "max_altitude":
                query.c.max_altitude,
                "aircraft_beacon_count":
                query.c.aircraft_beacon_count,
                "device_count":
                query.c.device_count,
            }))

    result = session.execute(upd)
    update_counter = result.rowcount
    session.commit()
    logger.debug(
        "Updated receiver coverage entries: {}".format(update_counter))

    # if a receiver coverage entry doesnt exist --> insert it
    new_coverage_entries = session.query(query).filter(~exists().where(
        and_(
            ReceiverCoverage.location_mgrs_short ==
            query.c.location_mgrs_short, ReceiverCoverage.receiver_id ==
            query.c.receiver_id, ReceiverCoverage.date == date)))

    ins = insert(ReceiverCoverage).from_select(
        (
            ReceiverCoverage.location_mgrs_short,
            ReceiverCoverage.receiver_id,
            ReceiverCoverage.date,
            ReceiverCoverage.max_signal_quality,
            ReceiverCoverage.min_altitude,
            ReceiverCoverage.max_altitude,
            ReceiverCoverage.aircraft_beacon_count,
            ReceiverCoverage.device_count,
        ),
        new_coverage_entries,
    )

    result = session.execute(ins)
    insert_counter = result.rowcount
    session.commit()

    finish_message = "ReceiverCoverage: {} inserted, {} updated".format(
        insert_counter, update_counter)
    logger.debug(finish_message)
    return finish_message
Beispiel #22
0
def topsku_week_table_js():
    date = datetime.strptime(request.form["sku_date"], "%Y-%m-%d").date()
    hour = int(request.form["hour"])

    weekday = date.weekday()
    start_date = date - relativedelta(weeks=4, weekday=weekday)
    end_date = date

    # if start_date == "" and end_date == "" and date.today().weekday() != 6:
    #     date_today = date.today()
    #     start_date = date_today - relativedelta(weeks=4, weekday=MO(-1))
    #     end_date = start_date + relativedelta(weeks=3, weekday=SU(1))
    # elif start_date == "" and end_date == "" and date.today().weekday() == 6:
    #     date_today = date.today()
    #     start_date = date_today - relativedelta(weeks=4, weekday=MO(-1))
    #     end_date = date_today
    # else:
    #     start_date = datetime.strptime(start_date,"%Y-%m-%d").date() + relativedelta(weekday=MO(-1))
    #     end_date = datetime.strptime(end_date,"%Y-%m-%d").date() + relativedelta(weekday=SU(1))


    brands = db.session.query(func.distinct(top_sku_talendfc.brand))\
                .filter(and_(top_sku_talendfc.txn_date >= start_date,
                                top_sku_talendfc.txn_date <= end_date,
                                func.weekday(top_sku_talendfc.txn_date) == weekday,
                                top_sku_talendfc.processing_hr == hour))
    lookup = db.session.query(top_sku_talendfc.txn_date,top_sku_talendfc.brand,func.sum(top_sku_talendfc.txn_amount))\
                .filter(and_(top_sku_talendfc.txn_date >= start_date,
                                top_sku_talendfc.txn_date <= end_date,
                                func.weekday(top_sku_talendfc.txn_date) == weekday,
                                top_sku_talendfc.processing_hr == hour))\
                .group_by(top_sku_talendfc.txn_date,top_sku_talendfc.brand)

    brands = [b[0] for b in brands] + ["TOTAL"]
    sku_dict = {}

    for l in lookup.all():
        key = l[0].strftime("%Y-%m-%d")
        if key not in sku_dict.keys():
            sku_dict[key] = {
                "brands": dict.fromkeys(brands, None),
            }
            sku_dict[key]["brands"][l[1]] = str(l[2])
            if sku_dict[key]["brands"]["TOTAL"] == None:
                sku_dict[key]["brands"]["TOTAL"] = 0
            sku_dict[key]["brands"]["TOTAL"] += l[2]
        else:
            if sku_dict[key]["brands"]["TOTAL"] == None:
                sku_dict[key]["brands"]["TOTAL"] = 0
            sku_dict[key]["brands"][l[1]] = str(l[2])
            sku_dict[key]["brands"]["TOTAL"] += l[2]
    # for l in lookup.all():
    #     cur_week = ""
    #     if l[1] not in sku_dict.keys():
    #         sku_dict[l[1]] = {
    #             "start_date": None,
    #             "end_date": None,
    #             "brands": dict.fromkeys(brands,None),
    #         }

    #         insert_sku_table(l,sku_dict[l[1]])
    #         aggregate_sku_table(l,sku_dict[l[1]])
    #     else:
    #         insert_sku_table(l,sku_dict[l[1]])
    #         aggregate_sku_table(l,sku_dict[l[1]])

    formatted_data = {"columns": ["Dates"] + brands, "data": []}
    for k in sku_dict.keys():
        sku = dict.fromkeys(formatted_data["columns"])
        sku["Dates"] = k
        for b in sku_dict[k]["brands"].keys():
            sku[b] = str(sku_dict[k]["brands"][b])
        formatted_data["data"].append(sku)
    # for k in sku_dict.keys():
    #     sku = dict.fromkeys(formatted_data["columns"])
    #     sku["Dates"] = str(sku_dict[k]["start_date"]) + " to " + str(sku_dict[k]["end_date"])
    #     for b in sku_dict[k]["brands"].keys():
    #         sku[b] = str(sku_dict[k]["brands"][b])
    #     formatted_data["data"].append(sku)

    # print(formatted_data)
    return jsonify(formatted_data)
Beispiel #23
0
    def __init__(self,
                 group_strategy,
                 timeseries_unit='day',
                 date_filter_attributes=None):
        self.filterable_attributes = [
            DATE, CUSTOM_ATTRIBUTE, TRANSFER_ACCOUNT, USER
        ]
        self.timeseries_unit = timeseries_unit
        self.date_filter_attributes = date_filter_attributes
        self.metrics = []

        # Timeseries Metrics
        if group_strategy:
            users_created_timeseries_query = group_strategy.build_query_group_by_with_join(db.session.query(func.count(User.id).label('volume'),
                    func.date_trunc(self.timeseries_unit, self.date_filter_attributes[User]).label('date'), group_strategy.group_by_column)\
                    .group_by(func.date_trunc(self.timeseries_unit, self.date_filter_attributes[User])), User)
            aggregated_users_created_query = group_strategy\
                .build_query_group_by_with_join(db.session.query(func.count(User.id).label('volume'), group_strategy.group_by_column), User)
        else:
            users_created_timeseries_query = db.session.query(func.count(User.id).label('volume'),
                    func.date_trunc(self.timeseries_unit, self.date_filter_attributes[User]).label('date'))\
                    .group_by(func.date_trunc(self.timeseries_unit, self.date_filter_attributes[User]))
            aggregated_users_created_query = None
        total_users_created_query = db.session.query(
            func.count(User.id).label('volume'))
        self.metrics.append(
            metric.Metric(
                metric_name='users_created',
                is_timeseries=True,
                query=users_created_timeseries_query,
                aggregated_query=aggregated_users_created_query,
                total_query=total_users_created_query,
                object_model=User,
                #stock_filters=[filters.beneficiary_filters], # NOTE: Do we still want this filter?
                stock_filters=[],
                query_caching_combinatory_strategy=metrics_cache.SUM_OBJECTS,
                aggregated_query_caching_combinatory_strategy=metrics_cache.
                SUM_OBJECTS,
                total_query_caching_combinatory_strategy=metrics_cache.TALLY,
                filterable_by=self.filterable_attributes,
                query_actions=[FORMAT_TIMESERIES],
                aggregated_query_actions=[FORMAT_AGGREGATE_METRICS],
                total_query_actions=[GET_FIRST],
            ))

        if group_strategy:
            active_users_timeseries_query = group_strategy.build_query_group_by_with_join(db.session.query(func.count(func.distinct(CreditTransfer.sender_user_id)).label('volume'),
                    func.date_trunc(self.timeseries_unit, self.date_filter_attributes[CreditTransfer]).label('date'), group_strategy.group_by_column)\
                    .group_by(func.date_trunc(self.timeseries_unit, self.date_filter_attributes[CreditTransfer])), CreditTransfer)
            aggregated_active_users_query = group_strategy.build_query_group_by_with_join(
                db.session.query(
                    func.count(func.distinct(
                        CreditTransfer.sender_user_id)).label('volume'),
                    group_strategy.group_by_column), CreditTransfer)
        else:
            active_users_timeseries_query = db.session.query(func.count(func.distinct(CreditTransfer.sender_user_id)).label('volume'),
                    func.date_trunc(self.timeseries_unit, self.date_filter_attributes[CreditTransfer]).label('date'))\
                    .group_by(func.date_trunc(self.timeseries_unit, self.date_filter_attributes[CreditTransfer]))
            aggregated_active_users_query = None
        total_active_users_query = db.session.query(
            func.count(func.distinct(
                CreditTransfer.sender_user_id)).label('volume'))
        self.metrics.append(
            metric.Metric(
                metric_name='active_users',
                is_timeseries=True,
                query=active_users_timeseries_query,
                aggregated_query=aggregated_active_users_query,
                total_query=total_active_users_query,
                object_model=CreditTransfer,
                #stock_filters=[filters.beneficiary_filters], # NOTE: Do we still want this filter?
                stock_filters=[],
                query_caching_combinatory_strategy=metrics_cache.SUM_OBJECTS,
                aggregated_query_caching_combinatory_strategy=metrics_cache.
                QUERY_ALL,
                total_query_caching_combinatory_strategy=metrics_cache.
                QUERY_ALL,
                filterable_by=self.filterable_attributes,
                query_actions=[FORMAT_TIMESERIES],
                aggregated_query_actions=[FORMAT_AGGREGATE_METRICS],
                total_query_actions=[GET_FIRST],
            ))

        if group_strategy:
            total_users_timeseries_query = group_strategy.build_query_group_by_with_join(db.session.query(func.count(User.id).label('volume'),
                    func.date_trunc(self.timeseries_unit, self.date_filter_attributes[User]).label('date'), group_strategy.group_by_column)\
                    .group_by(func.date_trunc(self.timeseries_unit, self.date_filter_attributes[User])), User)

        else:
            total_users_timeseries_query = db.session.query(func.count(User.id).label('volume'),
                    func.date_trunc(self.timeseries_unit, self.date_filter_attributes[User]).label('date'))\
                    .group_by(func.date_trunc(self.timeseries_unit, self.date_filter_attributes[User]))
        self.metrics.append(
            metric.Metric(
                metric_name='total_population_cumulative',
                is_timeseries=True,
                query=total_users_timeseries_query,
                total_query=total_users_created_query,
                aggregated_query=aggregated_active_users_query,
                object_model=User,
                stock_filters=[],
                query_caching_combinatory_strategy=metrics_cache.SUM_OBJECTS,
                aggregated_query_caching_combinatory_strategy=metrics_cache.
                SUM_OBJECTS,
                total_query_caching_combinatory_strategy=metrics_cache.TALLY,
                filterable_by=self.filterable_attributes,
                aggregated_query_actions=[FORMAT_AGGREGATE_METRICS],
                total_query_actions=[GET_FIRST],
                query_actions=[
                    ADD_MISSING_DAYS_TO_TODAY, ACCUMULATE_TIMESERIES,
                    FORMAT_TIMESERIES
                ]))
Beispiel #24
0
def calculate_transfer_stats(total_time_series=False,
                             start_date=None,
                             end_date=None,
                             user_filter={}):
    date_filter = []
    filter_active = False
    if start_date is not None and end_date is not None:
        date_filter.append(CreditTransfer.created >= start_date)
        date_filter.append(CreditTransfer.created <= end_date)
        filter_active = True

    disbursement_filters = [
        CreditTransfer.transfer_status == TransferStatusEnum.COMPLETE,
        CreditTransfer.transfer_type == TransferTypeEnum.PAYMENT,
        CreditTransfer.transfer_subtype == TransferSubTypeEnum.DISBURSEMENT
    ]

    standard_payment_filters = [
        CreditTransfer.transfer_status == TransferStatusEnum.COMPLETE,
        CreditTransfer.transfer_type == TransferTypeEnum.PAYMENT,
        CreditTransfer.transfer_subtype == TransferSubTypeEnum.STANDARD
    ]

    exchanged_filters = [
        CreditTransfer.transfer_status == TransferStatusEnum.COMPLETE,
        CreditTransfer.transfer_type == TransferTypeEnum.EXCHANGE,
        CreditTransfer.token == g.active_organisation.token
    ]

    beneficiary_filters = [User.has_beneficiary_role == True]
    vendor_filters = [User.has_vendor_role == True]

    exhaused_balance_filters = [
        CreditTransfer.transfer_type == TransferTypeEnum.PAYMENT,
        TransferAccount._balance_wei == 0
    ]

    transfer_use_filters = [
        *standard_payment_filters,
        CreditTransfer.transfer_use.isnot(None),
    ]

    # Disable cache if any filters are being used
    disable_cache = False
    if user_filter or date_filter:
        disable_cache = True

    total_distributed = db.session.query(
        func.sum(CreditTransfer.transfer_amount).label('total'))
    total_distributed = apply_filters(total_distributed, user_filter,
                                      CreditTransfer)
    total_distributed = total_distributed.filter(*disbursement_filters).filter(
        *date_filter)
    total_distributed = metrics_cache.execute_with_partial_history_cache(
        'total_distributed',
        total_distributed,
        CreditTransfer,
        metrics_cache.SUM,
        disable_cache=disable_cache)

    total_spent = db.session.query(
        func.sum(CreditTransfer.transfer_amount).label('total'))
    total_spent = apply_filters(total_spent, user_filter, CreditTransfer)
    total_spent = total_spent.filter(*standard_payment_filters).filter(
        *date_filter)
    total_spent = metrics_cache.execute_with_partial_history_cache(
        'total_spent',
        total_spent,
        CreditTransfer,
        metrics_cache.SUM,
        disable_cache=disable_cache)

    total_exchanged = db.session.query(
        func.sum(CreditTransfer.transfer_amount).label('total'))
    total_exchanged = apply_filters(total_exchanged, user_filter,
                                    CreditTransfer)
    total_exchanged = total_exchanged.filter(*exchanged_filters).filter(
        *date_filter)
    total_exchanged = metrics_cache.execute_with_partial_history_cache(
        'total_exchanged',
        total_exchanged,
        CreditTransfer,
        metrics_cache.SUM,
        disable_cache=disable_cache)

    total_beneficiaries = db.session.query(User).filter(*beneficiary_filters)
    total_beneficiaries = metrics_cache.execute_with_partial_history_cache(
        'total_beneficiaries',
        total_beneficiaries,
        CreditTransfer,
        metrics_cache.COUNT,
        disable_cache=disable_cache)

    total_vendors = db.session.query(User).filter(*vendor_filters)
    total_vendors = metrics_cache.execute_with_partial_history_cache(
        'total_vendors',
        total_vendors,
        CreditTransfer,
        metrics_cache.COUNT,
        disable_cache=disable_cache)

    total_users = total_beneficiaries + total_vendors

    has_transferred_count = db.session.query(func.count(func.distinct(CreditTransfer.sender_user_id))
        .label('transfer_count'))\
        .filter(*standard_payment_filters) \
        .filter(*date_filter) \
            .first().transfer_count

    exhausted_balance_count = db.session.query(func.count(func.distinct(
        CreditTransfer.sender_transfer_account_id))
        .label('transfer_count')) \
        .join(CreditTransfer.sender_transfer_account)\
        .filter(*exhaused_balance_filters) \
        .filter(*date_filter) \
            .first().transfer_count

    daily_transaction_volume = db.session.query(
        func.sum(CreditTransfer.transfer_amount).label('volume'),
        func.date_trunc('day', CreditTransfer.created).label('date'))
    daily_transaction_volume = apply_filters(daily_transaction_volume,
                                             user_filter, CreditTransfer)
    daily_transaction_volume = daily_transaction_volume.group_by(func.date_trunc('day', CreditTransfer.created))\
        .filter(*standard_payment_filters) \
        .filter(*date_filter)
    daily_transaction_volume = metrics_cache.execute_with_partial_history_cache(
        'daily_transaction_volume',
        daily_transaction_volume,
        CreditTransfer,
        metrics_cache.SUM_OBJECTS,
        disable_cache=disable_cache)

    daily_disbursement_volume = db.session.query(
        func.sum(CreditTransfer.transfer_amount).label('volume'),
        func.date_trunc('day', CreditTransfer.created).label('date'))
    daily_disbursement_volume = apply_filters(daily_disbursement_volume,
                                              user_filter, CreditTransfer)
    daily_disbursement_volume = daily_disbursement_volume.group_by(func.date_trunc('day', CreditTransfer.created)) \
        .filter(*disbursement_filters) \
        .filter(*date_filter)
    daily_disbursement_volume = metrics_cache.execute_with_partial_history_cache(
        'daily_disbursement_volume',
        daily_disbursement_volume,
        CreditTransfer,
        metrics_cache.SUM_OBJECTS,
        disable_cache=disable_cache)

    transfer_use_breakdown = db.session.query(
        CreditTransfer.transfer_use.cast(JSONB),
        func.count(CreditTransfer.transfer_use))
    transfer_use_breakdown = apply_filters(transfer_use_breakdown, user_filter,
                                           CreditTransfer)
    transfer_use_breakdown = transfer_use_breakdown.filter(*transfer_use_filters) \
        .group_by(CreditTransfer.transfer_use.cast(JSONB)) \
            .all()

    try:
        last_day = daily_transaction_volume[0][1]
        last_day_volume = daily_transaction_volume[0][0]
        transaction_vol_list = [{
            'date': item[1].isoformat(),
            'volume': item[0]
        } for item in daily_transaction_volume]
    except IndexError:  # No transactions
        last_day = datetime.datetime.utcnow()
        last_day_volume = 0
        has_transferred_count = 0
        transaction_vol_list = [{
            'date': datetime.datetime.utcnow().isoformat(),
            'volume': 0
        }]

    try:
        disbursement_vol_list = [{
            'date': item[1].isoformat(),
            'volume': item[0]
        } for item in daily_disbursement_volume]
    except IndexError:
        disbursement_vol_list = [{
            'date': datetime.datetime.utcnow().isoformat(),
            'volume': 0
        }]

    try:
        master_wallet_balance = cached_funds_available()
    except:
        master_wallet_balance = 0

    data = {
        'total_distributed': total_distributed,
        'total_spent': total_spent,
        'total_exchanged': total_exchanged,
        'has_transferred_count': has_transferred_count,
        'zero_balance_count': exhausted_balance_count,
        'total_beneficiaries': total_beneficiaries,
        'total_users': total_users,
        'master_wallet_balance': master_wallet_balance,
        'daily_transaction_volume': transaction_vol_list,
        'daily_disbursement_volume': disbursement_vol_list,
        'transfer_use_breakdown': transfer_use_breakdown,
        'last_day_volume': {
            'date': last_day.isoformat(),
            'volume': last_day_volume
        },
        'filter_active': filter_active
    }
    return data
Beispiel #25
0
    """
    Just encapsulates everything needed to make an excel report
    """
    def __init__(self, title, query, headers=None, totals=None):
        #string queries
        if type(query) in (unicode, str):
            self.query = RawQuery(session, query)
        else:
            self.query = query
        self.title = title
        self.headers = headers
        self.totals = totals

sampleperiod_subq = session.query(
    SamplePeriod.anopheline2_id,
    func.count(func.distinct(SamplePeriod.site_id)).label('site_count'),
    func.count('*').label('sampleperiod_count')
    ).group_by(SamplePeriod.anopheline2_id).subquery()

point_subq = session.query(
    SamplePeriod.anopheline2_id,
    func.count('*').label('count')
    ).filter(Site.area_type=='point').filter(exists().where(SamplePeriod.site_id==Site.site_id)).filter(Anopheline.id==SamplePeriod.anopheline2_id)

q = session.query(Anopheline.name,
    func.coalesce(sampleperiod_subq.c.site_count,0),
    func.coalesce(sampleperiod_subq.c.sampleperiod_count, 0)
    ).order_by(Anopheline.name.desc())

reports.append(
    ExcelReport(
Beispiel #26
0
def strava_import(current_user, activity_id):
    client = get_strava_client(current_user)
    activity = client.get_activity(activity_id=activity_id)
    stream_types = [
        'time', 'distance', 'latlng', 'temp', 'heartrate', 'velocity_smooth',
        'altitude'
    ]
    streams = client.get_activity_streams(activity_id, types=stream_types)

    device_ids = [
        device_id for device_id, in db.session.query(
            func.distinct(Move.device_id)).join(User).join(Device).filter(
                Device.name != gpx_import.GPX_DEVICE_NAME).filter(
                    Move.user == current_user).all()
    ]

    assert len(device_ids) == 1
    device_id = device_ids[0]

    device = db.session.query(Device).filter_by(id=device_id).one()

    activity_string = map_type(activity.type)

    result = db.session.query(
        Move.activity_type).filter(Move.activity == activity_string).first()
    if result:
        activity_type, = result
    else:
        activity_type = None

    move = Move()
    move.user = current_user
    move.duration = activity.elapsed_time
    move.ascent = float(activity.total_elevation_gain)
    move.speed_avg = float(activity.average_speed)
    move.hr_avg = heart_rate(activity.average_heartrate)
    move.temperature_avg = celcius_to_kelvin(activity.average_temp)
    move.device = device
    move.date_time = activity.start_date_local
    move.activity = activity_string
    move.activity_type = activity_type
    move.distance = float(activity.distance)
    move.import_date_time = datetime.now()
    move.import_module = __name__
    move.strava_activity_id = activity_id
    move.public = False
    move.source = "Strava activity id=%d; external_id='%s'" % (
        activity_id, activity.external_id)

    lengths = set([len(streams[stream].data) for stream in streams])
    assert len(lengths) == 1
    length, = lengths

    move.speed_max = move.speed_avg

    all_samples = []
    for i in range(0, length):
        time = timedelta(seconds=streams['time'].data[i])
        distance = float(streams['distance'].data[i])

        if 'heartrate' in streams:
            hr = float(streams['heartrate'].data[i])
        else:
            hr = None

        if 'latlng' in streams:
            lat, lng = streams['latlng'].data[i]
        else:
            lat = None
            lng = None

        if 'altitude' in streams:
            altitude = float(streams['altitude'].data[i])
        else:
            altitude = None

        if 'velocity_smooth' in streams:
            speed = float(streams['velocity_smooth'].data[i])
        else:
            speed = None

        if 'temp' in streams:
            temperature = celcius_to_kelvin(streams['temp'].data[i])
        else:
            temperature = None

        sample = Sample()
        sample.sample_type = SAMPLE_TYPE
        sample.move = move
        sample.time = time
        sample.utc = (activity.start_date + time).replace(tzinfo=None)
        sample.distance = distance
        sample.latitude = degree_to_radian(lat)
        sample.longitude = degree_to_radian(lng)
        sample.hr = heart_rate(hr)
        sample.temperature = temperature
        sample.speed = speed
        sample.altitude = altitude
        move.speed_max = max(move.speed_max, speed)
        all_samples.append(sample)

    derive_move_infos_from_samples(move, all_samples)

    db.session.add(move)
    db.session.commit()
    return move
Beispiel #27
0
def strava_import(current_user, activity_id):
    client = get_strava_client(current_user)
    activity = client.get_activity(activity_id=activity_id)
    stream_types = ['time', 'distance', 'latlng', 'temp', 'heartrate', 'velocity_smooth', 'altitude']
    streams = client.get_activity_streams(activity_id, types=stream_types)

    device_ids = [device_id for device_id, in db.session.query(func.distinct(Move.device_id))
        .join(User)
        .join(Device)
        .filter(Device.name != gpx_import.GPX_DEVICE_NAME)
        .filter(Move.user == current_user).all()]

    assert len(device_ids) == 1
    device_id = device_ids[0]

    device = db.session.query(Device).filter_by(id = device_id).one();

    activity_string = map_type(activity.type)

    result = db.session.query(Move.activity_type).filter(Move.activity == activity_string).first()
    if result:
        activity_type, = result
    else:
        activity_type = None

    move = Move()
    move.user = current_user
    move.duration = activity.elapsed_time
    move.ascent = float(activity.total_elevation_gain)
    move.speed_avg = float(activity.average_speed)
    move.hr_avg = heart_rate(activity.average_heartrate)
    move.temperature_avg = celcius_to_kelvin(activity.average_temp)
    move.device = device
    move.date_time = activity.start_date_local
    move.activity = activity_string
    move.activity_type = activity_type
    move.distance = float(activity.distance)
    move.import_date_time = datetime.now()
    move.import_module = __name__
    move.strava_activity_id = activity_id
    move.public = False
    move.source = "Strava activity id=%d; external_id='%s'" % (activity_id, activity.external_id)

    lengths = set([len(streams[stream].data) for stream in streams])
    assert len(lengths) == 1
    length, = lengths

    move.speed_max = move.speed_avg

    all_samples = []
    for i in range(0, length):
        time = timedelta(seconds=streams['time'].data[i])
        distance = float(streams['distance'].data[i])

        if 'heartrate' in streams:
            hr = float(streams['heartrate'].data[i])
        else:
            hr = None

        if 'latlng' in streams:
            lat, lng = streams['latlng'].data[i]
        else:
            lat = None
            lng = None

        if 'altitude' in streams:
            altitude = float(streams['altitude'].data[i])
        else:
            altitude = None

        if 'velocity_smooth' in streams:
            speed = float(streams['velocity_smooth'].data[i])
        else:
            speed = None

        if 'temp' in streams:
            temperature = celcius_to_kelvin(streams['temp'].data[i])
        else:
            temperature = None

        sample = Sample()
        sample.sample_type = SAMPLE_TYPE
        sample.move = move
        sample.time = time
        sample.utc = (activity.start_date + time).replace(tzinfo=None)
        sample.distance = distance
        sample.latitude = degree_to_radian(lat)
        sample.longitude = degree_to_radian(lng)
        sample.hr = heart_rate(hr)
        sample.temperature = temperature
        sample.speed = speed
        sample.altitude = altitude
        move.speed_max = max(move.speed_max, speed)
        all_samples.append(sample)

    derive_move_infos_from_samples(move, all_samples)

    db.session.add(move)
    db.session.commit()
    return move
Beispiel #28
0
    share = relation(Share,
                     innerjoin=True,
                     backref=backref('disks', cascade='all'))

    __extra_table_args__ = (Index('%s_share_idx' % _TN, share_id), )
    __mapper_args__ = {'polymorphic_identity': 'virtual_disk'}

    def __init__(self, **kw):
        if 'address' not in kw or kw['address'] is None:
            raise ValueError("address is mandatory for shared disks")
        super(VirtualDisk, self).__init__(**kw)

    def __repr__(self):
        return "<%s %s (%s) of machine %s, %d GB, provided by %s>" % \
                (self._get_class_label(), self.device_name,
                 self.controller_type, self.machine.label, self.capacity,
                 (self.share.name if self.share else "no_share"))


# The formatter code is interested in the count of disks/machines, and it is
# cheaper to query the DB than to load all entities into memory
Share.disk_count = column_property(select(
    [func.count()], VirtualDisk.share_id == Share.id).label("disk_count"),
                                   deferred=True)

Share.machine_count = column_property(
    select([func.count(func.distinct(VirtualDisk.machine_id))],
           VirtualDisk.share_id == Share.id).label("machine_count"),
    deferred=True)
Beispiel #29
0
def run():
    options, args = commandline_args("Dump BIP into JSON files")
    d = DB()
    conn, meta = d.connect(options["database"])

    park_table = d.model.park
    park_dim_table = d.model.park_dimension
    bip_table = d.model.bip
    game_table = d.model.game
    player_table = d.model.mlbam_player
    ab_table = d.model.atbat

    # Arg, postgres requires every column to be in the ORDER BY clause
    park_sql = select(
        [park_table.c.id, park_table.c.name, func.count(bip_table.c.id).label("num")],
        from_obj=park_table.join(bip_table),
    ).group_by(park_table.c.id, park_table.c.name)

    parks = {}
    for row in conn.execute(park_sql):
        p = {}
        for key in row.keys():
            if key in set(["id", "num"]):
                p[key] = int(row[key])
            else:
                p[key] = str(row[key])
        p["bip"] = p["num"]
        p["years"] = {}
        del p["num"]
        parks[p["id"]] = p

    # Skipping the closing year for simplicity. If a team switches parks in the
    # middle of the year, it will be a park with a new id. I believe the only
    # issue will be if MLBAM updates the image in the middle of the season.
    # This might happen for Citi Field (no bases or foul lines!).
    dimension_sql = select(
        [
            park_dim_table.c.park_id,
            park_dim_table.c.image_file,
            park_dim_table.c.opening,
            park_dim_table.c.hp_x,
            park_dim_table.c.hp_y,
            park_dim_table.c.image_hp_x,
            park_dim_table.c.image_hp_y,
            park_dim_table.c.scale,
        ],
        from_obj=park_dim_table,
    )

    for row in conn.execute(dimension_sql):
        id = row["park_id"]
        if id not in parks:
            continue
        opening = row["opening"]
        images = {
            "file": row["image_file"],
            "scale": float(row["scale"]),
            "hp_x": float(row["hp_x"]),
            "hp_y": float(row["hp_y"]),
            "image_hp_x": float(row["image_hp_x"]),
            "image_hp_y": float(row["image_hp_y"]),
            "opening": opening.year,
        }
        if "images" not in parks[id]:
            parks[id]["images"] = {}
        parks[id]["images"][opening.year] = images

    years_sql = select([func.distinct(text(get_year(conn, "day")))], from_obj=game_table)
    years = [int(row[0]) for row in conn.execute(years_sql)]

    p = player_table.alias()
    b = player_table.alias()
    bip_sql = select(
        [
            game_table.c.day.label("day"),
            bip_table.c.type.label("type"),
            bip_table.c.x.label("x"),
            bip_table.c.y.label("y"),
            ab_table.c.event.label("event"),
            (b.c.namelast + ", " + b.c.namefirst).label("batter"),
            ab_table.c.batter_stand.label("stand"),
            (p.c.namelast + ", " + p.c.namefirst).label("pitcher"),
            ab_table.c.pitcher_throw.label("throw"),
        ],
        and_(park_table.c.id == bindparam("park"), text(get_year(conn, "day")) == bindparam("year")),
        from_obj=bip_table.join(park_table)
        .join(ab_table)
        .join(game_table)
        .outerjoin(p, onclause=p.c.mlbamid == ab_table.c.pitcher)
        .outerjoin(b, onclause=b.c.mlbamid == ab_table.c.batter),
    )

    for park_id in parks.keys():
        for year in years:
            bip_list = []
            str_y = str(year)
            for bip in conn.execute(bip_sql, {"park": park_id, "year": str_y}):
                bip_list.append(
                    {
                        "x": bip["x"],
                        "y": bip["y"],
                        "event": bip["event"],
                        "type": bip["type"],
                        "pitcher": bip["pitcher"],
                        "throw": bip["throw"],
                        "batter": bip["batter"],
                        "stand": bip["stand"],
                    }
                )
            # No need to write empty files!
            if len(bip_list) > 0:
                parks[park_id]["years"][year] = True
                park_file = os.path.join(options["output_dir"], "park-" + str(park_id) + "-" + str(year) + ".json")
                dump_json(park_file, bip_list)

    parks_file = os.path.join(options["output_dir"], "parks.json")
    dump_json(parks_file, parks)
Beispiel #30
0
    def __init__(self, group_strategy, timeseries_unit = 'day', token=None, date_filter_attributes=None):
        self.filterable_attributes = [DATE, CUSTOM_ATTRIBUTE, TRANSFER_ACCOUNT, CREDIT_TRANSFER, USER]
        self.timeseries_unit = timeseries_unit
        self.date_filter_attributes = date_filter_attributes
        self.metrics = []

        total_amount_query = db.session.query(func.sum(CreditTransfer.transfer_amount).label('total'))
        self.metrics.append(metric.Metric(
            metric_name='total_distributed',
            query=total_amount_query,
            object_model=CreditTransfer,
            stock_filters=[filters.disbursement_filters],
            caching_combinatory_strategy=metrics_cache.SUM,
            filterable_by=self.filterable_attributes,
            bypass_user_filters=True,
        ))

        self.metrics.append(metric.Metric(
            metric_name='total_reclaimed',
            query=total_amount_query,
            object_model=CreditTransfer,
            stock_filters=[filters.reclamation_filters],
            caching_combinatory_strategy=metrics_cache.SUM,
            filterable_by=self.filterable_attributes,
            bypass_user_filters=True,
        ))

        self.metrics.append(metric.Metric(
            metric_name='total_withdrawn',
            query=total_amount_query,
            object_model=CreditTransfer,
            stock_filters=[filters.withdrawal_filters],
            caching_combinatory_strategy=metrics_cache.SUM,
            filterable_by=self.filterable_attributes,
            bypass_user_filters=True,
        ))

        # Timeseries Metrics
        if group_strategy:
            transaction_volume_timeseries_query = group_strategy.build_query_group_by_with_join(db.session.query(func.sum(CreditTransfer.transfer_amount).label('volume'),
                    func.date_trunc(self.timeseries_unit, self.date_filter_attributes[CreditTransfer]).label('date'), group_strategy.group_by_column).group_by(func.date_trunc(self.timeseries_unit, self.date_filter_attributes[CreditTransfer])), CreditTransfer)
            aggregated_transaction_volume_query = group_strategy.build_query_group_by_with_join(db.session.query(func.sum(CreditTransfer.transfer_amount).label('volume'), group_strategy.group_by_column), CreditTransfer)
        else:
            transaction_volume_timeseries_query = db.session.query(func.sum(CreditTransfer.transfer_amount).label('volume'),
                    func.date_trunc(self.timeseries_unit, self.date_filter_attributes[CreditTransfer]).label('date')).group_by(func.date_trunc(self.timeseries_unit, self.date_filter_attributes[CreditTransfer]))
            aggregated_transaction_volume_query = None
        total_transaction_volume_query = db.session.query(func.sum(CreditTransfer.transfer_amount).label('volume'))

        self.metrics.append(metric.Metric(
            metric_name='all_payments_volume',
            is_timeseries=True,
            query=transaction_volume_timeseries_query,
            aggregated_query=aggregated_transaction_volume_query,
            total_query=total_transaction_volume_query,
            object_model=CreditTransfer,
            timeseries_caching_combinatory_strategy=metrics_cache.SUM_OBJECTS,
            caching_combinatory_strategy=metrics_cache.QUERY_ALL,
            filterable_by=self.filterable_attributes,
            query_actions=[FORMAT_TIMESERIES],
            aggregated_query_actions=[FORMAT_AGGREGATE_METRICS],
            total_query_actions=[GET_FIRST],
            value_type=CURRENCY,
            token=token
        ))

        self.metrics.append(metric.Metric(
            metric_name='transfer_amount_per_user',
            is_timeseries=True,
            query=transaction_volume_timeseries_query,
            aggregated_query=aggregated_transaction_volume_query,
            total_query=total_transaction_volume_query,
            object_model=CreditTransfer,
            stock_filters=[filters.standard_payment_filters],
            timeseries_caching_combinatory_strategy=metrics_cache.SUM_OBJECTS,
            caching_combinatory_strategy=metrics_cache.QUERY_ALL,
            filterable_by=self.filterable_attributes,
            query_actions=[CALCULATE_TIMESERIES_PER_USER, FORMAT_TIMESERIES], # Add per user
            aggregated_query_actions=[CALCULATE_AGGREGATE_PER_USER, FORMAT_AGGREGATE_METRICS],
            total_query_actions=[GET_FIRST, CALCULATE_TOTAL_PER_USER],
            value_type=CURRENCY,
            token=token
        ))

        if group_strategy:
            transaction_count_timeseries_query = group_strategy.build_query_group_by_with_join(db.session.query(func.count(CreditTransfer.id).label('volume'),
                    func.date_trunc(self.timeseries_unit, self.date_filter_attributes[CreditTransfer]).label('date'), group_strategy.group_by_column).group_by(func.date_trunc(self.timeseries_unit, self.date_filter_attributes[CreditTransfer])), CreditTransfer)
            aggregated_transaction_count_query = group_strategy.build_query_group_by_with_join(db.session.query(func.count(CreditTransfer.id).label('volume'), group_strategy.group_by_column), CreditTransfer)
        else:
            transaction_count_timeseries_query = db.session.query(func.count(CreditTransfer.id).label('volume'),
                    func.date_trunc(self.timeseries_unit, self.date_filter_attributes[CreditTransfer]).label('date')).group_by(func.date_trunc(self.timeseries_unit, self.date_filter_attributes[CreditTransfer]))
            aggregated_transaction_count_query = None
        total_transaction_count_query = db.session.query(func.count(CreditTransfer.id).label('volume'))
        self.metrics.append(metric.Metric(
            metric_name='daily_transaction_count',
            is_timeseries=True,
            query=transaction_count_timeseries_query,
            aggregated_query=aggregated_transaction_count_query,
            total_query=total_transaction_count_query,
            object_model=CreditTransfer,
            stock_filters=[filters.standard_payment_filters],
            timeseries_caching_combinatory_strategy=metrics_cache.SUM_OBJECTS,
            caching_combinatory_strategy=metrics_cache.QUERY_ALL,
            filterable_by=self.filterable_attributes,
            query_actions=[FORMAT_TIMESERIES],
            aggregated_query_actions=[FORMAT_AGGREGATE_METRICS],
            total_query_actions=[GET_FIRST],
            value_type=COUNT
        ))
        
        self.metrics.append(metric.Metric(
            metric_name='trades_per_user',
            is_timeseries=True,
            query=transaction_count_timeseries_query,
            aggregated_query=aggregated_transaction_count_query,
            total_query=total_transaction_count_query,
            object_model=CreditTransfer,
            stock_filters=[filters.standard_payment_filters],
            timeseries_caching_combinatory_strategy=metrics_cache.SUM_OBJECTS,
            caching_combinatory_strategy=metrics_cache.QUERY_ALL,
            filterable_by=self.filterable_attributes,
            query_actions=[CALCULATE_TIMESERIES_PER_USER, FORMAT_TIMESERIES], # Add per user
            aggregated_query_actions=[CALCULATE_AGGREGATE_PER_USER, FORMAT_AGGREGATE_METRICS],
            total_query_actions=[GET_FIRST, CALCULATE_TOTAL_PER_USER],
            value_type=COUNT_AVERAGE,
        ))

        if group_strategy:
            active_users_timeseries_query = group_strategy.build_query_group_by_with_join(db.session.query(func.count(func.distinct(CreditTransfer.sender_user_id)).label('volume'),
                    func.date_trunc(self.timeseries_unit, self.date_filter_attributes[CreditTransfer]).label('date'), group_strategy.group_by_column).group_by(func.date_trunc(self.timeseries_unit, self.date_filter_attributes[CreditTransfer])), CreditTransfer)
            aggregated_active_users_query = group_strategy.build_query_group_by_with_join(db.session.query(func.count(func.distinct(CreditTransfer.sender_user_id)).label('volume'), group_strategy.group_by_column), CreditTransfer)
        else:
            active_users_timeseries_query = db.session.query(func.count(func.distinct(CreditTransfer.sender_user_id)).label('volume'),
                func.date_trunc(self.timeseries_unit, self.date_filter_attributes[CreditTransfer]).label('date')).group_by(func.date_trunc(self.timeseries_unit, self.date_filter_attributes[CreditTransfer]))
            aggregated_active_users_query = None
        total_transaction_volume_query = db.session.query(func.count(func.distinct(CreditTransfer.sender_user_id)).label('volume'))
        self.metrics.append(metric.Metric(
            metric_name='users_who_made_purchase',
            is_timeseries=True,
            query=active_users_timeseries_query,
            aggregated_query=aggregated_active_users_query,
            total_query=total_transaction_volume_query,
            object_model=CreditTransfer,
            #stock_filters=[filters.beneficiary_filters], # NOTE: Do we want this filter?
            stock_filters=[],
            timeseries_caching_combinatory_strategy=metrics_cache.SUM_OBJECTS,
            caching_combinatory_strategy=metrics_cache.QUERY_ALL,
            filterable_by=self.filterable_attributes,
            query_actions=[FORMAT_TIMESERIES],
            aggregated_query_actions=[FORMAT_AGGREGATE_METRICS],
            total_query_actions=[GET_FIRST],
            value_type=COUNT,
        ))
Beispiel #31
0
def topsku_week_table_excel():
    date = datetime.strptime(request.form["sku_date"], "%Y-%m-%d").date()
    hour = int(request.form["hour"])

    weekday = date.weekday()
    start_date = date - relativedelta(weeks=4, weekday=weekday)
    end_date = date

    brands = db.session.query(func.distinct(top_sku_talendfc.brand))\
                .filter(and_(top_sku_talendfc.txn_date >= start_date,
                                top_sku_talendfc.txn_date <= end_date,
                                func.weekday(top_sku_talendfc.txn_date) == weekday,
                                top_sku_talendfc.processing_hr == hour))
    lookup = db.session.query(top_sku_talendfc.txn_date,top_sku_talendfc.brand,func.sum(top_sku_talendfc.txn_amount))\
                .filter(and_(top_sku_talendfc.txn_date >= start_date,
                                top_sku_talendfc.txn_date <= end_date,
                                func.weekday(top_sku_talendfc.txn_date) == weekday,
                                top_sku_talendfc.processing_hr == hour))\
                .group_by(top_sku_talendfc.txn_date,top_sku_talendfc.brand)

    brands = [b[0] for b in brands] + ["TOTAL"]
    sku_dict = {}

    for l in lookup.all():
        key = l[0].strftime("%Y-%m-%d")
        if key not in sku_dict.keys():
            sku_dict[key] = {
                "brands": dict.fromkeys(brands, None),
            }
            sku_dict[key]["brands"][l[1]] = str(l[2])
            if sku_dict[key]["brands"]["TOTAL"] == None:
                sku_dict[key]["brands"]["TOTAL"] = 0
            sku_dict[key]["brands"]["TOTAL"] += l[2]
        else:
            if sku_dict[key]["brands"]["TOTAL"] == None:
                sku_dict[key]["brands"]["TOTAL"] = 0
            sku_dict[key]["brands"][l[1]] = str(l[2])
            sku_dict[key]["brands"]["TOTAL"] += l[2]

    formatted_data = {"columns": ["Dates"] + brands, "data": []}
    for k in sku_dict.keys():
        sku = dict.fromkeys(formatted_data["columns"])
        sku["Dates"] = k
        for b in sku_dict[k]["brands"].keys():
            sku[b] = str(sku_dict[k]["brands"][b])
        formatted_data["data"].append(sku)

    #output in bytes
    output = io.BytesIO()
    #create WorkBook object
    workbook = Workbook()
    workbook_name = "Manifest Hive Bashing {}".format(
        datetime.now().strftime("%Y-%m-%d %H-%M-%S"))
    #add a sheet
    ws = workbook.create_sheet('Week on Week Amounts per Brand', 0)

    greenFill = PatternFill(start_color='AEEA00',
                            end_color='AEEA00',
                            fill_type='solid')

    x_pos = 1
    y_pos = 1
    temp_y = 1
    x_lim = 19
    row = 0

    for c in cdr_dict.keys():
        print(c)

    workbook.save(output)
    output.seek(0)
    pprint.pprint(cdr_dict)
    filename = workbook_name

    return Response(
        output,
        mimetype=
        "application/openxmlformats-officedocument.spreadsheetml.sheet",
        headers={
            "Content-Disposition":
            "attachment;filename={}.xlsx".format(filename)
        })
Beispiel #32
0
    def child_source_scores(self, row):
        """ Counts of children sources, how many speak, etc. """
        self.scores_ws.write(row, 0, 'Child Sources')

        # all child sources
        rows = self.filter(
            db.session.query(
                Medium.name,
                func.count(1).label('freq'))
            .join(Document)
            .join(DocumentSource)
            .filter(DocumentSource.source_type == 'child')
            .group_by(Medium.name)
        ).all()
        self.write_simple_score_row('Total child sources', rows, row)
        row += 1
        self.write_percent_row('Child sources', self.score_row['Total sources'], row - 1, row)
        row += 1

        # quoted child sources
        rows = self.filter(
            db.session.query(
                Medium.name,
                func.count(1).label('freq'))
            .join(Document)
            .join(DocumentSource)
            .filter(DocumentSource.source_type == 'child')
            .filter(DocumentSource.quoted == True)
            .group_by(Medium.name)
        ).all()  # noqa
        self.write_simple_score_row('Quoted child sources', rows, row)
        row += 1

        # percent of all sources
        self.write_percent_row('Quoted child sources', self.score_row['Total sources'], row - 1, row)
        row += 1

        # source counts per document
        rows = self.source_counts(children=True, limit=4)
        rows = [[m, c + ' Child Sources', v] for m, c, v in rows]
        buckets = ['1 Child Sources', '2 Child Sources', '3 Child Sources', '4 Child Sources', '>4 Child Sources']

        starting_row = row
        row = self.write_score_table(buckets, rows, row) + 1
        row = self.write_percent_table(buckets, self.score_row['Total articles'], starting_row, row) + 1

        # origin of documents with quoted children
        self.scores_ws.write(row, 0, 'Origins of Quoted Children')
        rows = self.filter(
            db.session.query(
                Medium.name,
                Location.name,
                func.count(func.distinct(Document.id)).label('freq'))
            .join(Document)
            .join(DocumentSource)
            .join(Location)
            .filter(DocumentSource.source_type == 'child')
            .filter(DocumentSource.quoted == True)
            .group_by(Medium.name)  # noqa
        ).all()
        origins = list(set(r[1] for r in rows))
        row = self.write_score_table(origins, rows, row)
        # entropy
        self.write_simple_score_row('Diversity of Quoted Origins', self.entropy(rows), row)
        row += 1

        return row
Beispiel #33
0
    def __repr__(self):
        return "<%s %s (%s) of machine %s, %d GB, provided by %s>" % \
            (self._get_class_label(), self.device_name,
             self.controller_type, self.machine.label, self.capacity,
             (self.share.name if self.share else "no_share"))

# The formatter code is interested in the count of disks/machines, and it is
# cheaper to query the DB than to load all entities into memory
Share.disk_count = column_property(
    select([func.count()],
           VirtualNasDisk.share_id == Share.id)
    .label("disk_count"), deferred=True)

Share.machine_count = column_property(
    select([func.count(func.distinct(VirtualNasDisk.machine_id))],
           VirtualNasDisk.share_id == Share.id)
    .label("machine_count"), deferred=True)


class VirtualLocalDisk(VirtualDisk):
    filesystem_id = Column(Integer, ForeignKey('filesystem.id',
                                               name='%s_filesystem_fk' % _TN,
                                               ondelete='CASCADE'),
                           nullable=True)

    filesystem = relation(Filesystem, innerjoin=True,
                          backref=backref('disks', cascade='all'))

    __extra_table_args__ = (Index('%s_filesystem_idx' % _TN, filesystem_id),)
    __mapper_args__ = {'polymorphic_identity': 'virtual_localdisk'}
Beispiel #34
0
def calculate_transfer_stats(total_time_series=False):

    total_distributed = db.session.query(func.sum(models.CreditTransfer.transfer_amount).label('total'))\
        .filter(models.CreditTransfer.transfer_type == models.TransferTypeEnum.DISBURSEMENT).first().total

    total_spent = db.session.query(func.sum(models.CreditTransfer.transfer_amount).label('total'))\
        .filter(models.CreditTransfer.transfer_type == models.TransferTypeEnum.PAYMENT).first().total

    total_beneficiaries = db.session.query(
        models.User).filter(models.User.is_beneficiary == True).count()

    total_vendors = db.session.query(models.User)\
        .filter(models.User.is_vendor == True).count()

    total_users = total_beneficiaries + total_vendors

    has_transferred_count = db.session.query(func.count(func.distinct(models.CreditTransfer.sender_user_id))
        .label('transfer_count'))\
        .filter(models.CreditTransfer.transfer_type == models.TransferTypeEnum.PAYMENT).first().transfer_count

    # zero_balance_count = db.session.query(func.count(models.TransferAccount.id).label('zero_balance_count'))\
    #     .filter(models.TransferAccount.balance == 0).first().zero_balance_count

    exhausted_balance_count = db.session.query(func.count(func.distinct(models.CreditTransfer.sender_transfer_account_id))
        .label('transfer_count')) \
        .join(models.CreditTransfer.sender_transfer_account)\
        .filter(models.CreditTransfer.transfer_type == models.TransferTypeEnum.PAYMENT) \
        .filter(models.TransferAccount.balance == 0).first().transfer_count

    daily_transaction_volume = db.session.query(func.sum(models.CreditTransfer.transfer_amount).label('volume'),
                 func.date_trunc('day', models.CreditTransfer.created).label('date'))\
        .group_by(func.date_trunc('day', models.CreditTransfer.created))\
        .filter(models.CreditTransfer.transfer_type == models.TransferTypeEnum.PAYMENT).all()

    daily_disbursement_volume = db.session.query(func.sum(models.CreditTransfer.transfer_amount).label('volume'),
                                                func.date_trunc('day', models.CreditTransfer.created).label('date')) \
        .group_by(func.date_trunc('day', models.CreditTransfer.created)) \
        .filter(models.CreditTransfer.transfer_type == models.TransferTypeEnum.DISBURSEMENT).all()

    try:
        master_wallet_balance = master_wallet_funds_available()
    except BlockchainError:
        master_wallet_balance = 0

    try:
        last_day = daily_transaction_volume[0].date
        last_day_volume = daily_transaction_volume[0].volume
        transaction_vol_list = [{
            'date': item.date.isoformat(),
            'volume': item.volume
        } for item in daily_transaction_volume]
    except IndexError:  # No transactions
        last_day = datetime.datetime.utcnow()
        last_day_volume = 0
        has_transferred_count = 0
        transaction_vol_list = [{
            'date': datetime.datetime.utcnow().isoformat(),
            'volume': 0
        }]

    try:
        last_day_disbursement_volume = daily_disbursement_volume[0].volume
        disbursement_vol_list = [{
            'date': item.date.isoformat(),
            'volume': item.volume
        } for item in daily_disbursement_volume]
    except IndexError:
        last_day_disbursement_volume = 0
        disbursement_vol_list = [{
            'date': datetime.datetime.utcnow().isoformat(),
            'volume': 0
        }]

    data = {
        'total_distributed': total_distributed,
        'total_spent': total_spent,
        'has_transferred_count': has_transferred_count,
        'zero_balance_count': exhausted_balance_count,
        'total_beneficiaries': total_beneficiaries,
        'total_users': total_users,
        'master_wallet_balance': master_wallet_balance,
        'daily_transaction_volume': transaction_vol_list,
        'daily_disbursement_volume': disbursement_vol_list,
        'last_day_volume': {
            'date': last_day.isoformat(),
            'volume': last_day_volume
        }
    }

    return data
Beispiel #35
0
def create_receiver_stats(session, date, logger=None):
    """Add/update receiver stats."""

    if logger is None:
        logger = app.logger

    (start, end) = date_to_timestamps(date)

    # First kill the stats for the selected date
    deleted_counter = session.query(ReceiverStats).filter(
        ReceiverStats.date == date).delete()

    # Select one day
    sq = session.query(ReceiverBeacon).filter(
        between(ReceiverBeacon.timestamp, start, end)).subquery()

    # Calculate stats, firstseen, lastseen and last values != NULL
    receiver_stats = session.query(
        distinct(sq.c.receiver_id).label("receiver_id"),
        literal(date).label("date"),
        func.first_value(sq.c.timestamp).over(
            partition_by=sq.c.receiver_id,
            order_by=case(
                [(sq.c.timestamp == null(), None)],
                else_=sq.c.timestamp).asc().nullslast()).label("firstseen"),
        func.first_value(sq.c.timestamp).over(
            partition_by=sq.c.receiver_id,
            order_by=case(
                [(sq.c.timestamp == null(), None)],
                else_=sq.c.timestamp).desc().nullslast()).label("lastseen"),
        func.first_value(sq.c.location).over(
            partition_by=sq.c.receiver_id,
            order_by=case([(sq.c.location == null(), None)],
                          else_=sq.c.timestamp).desc().nullslast()).label(
                              "location_wkt"),
        func.first_value(sq.c.altitude).over(
            partition_by=sq.c.receiver_id,
            order_by=case(
                [(sq.c.altitude == null(), None)],
                else_=sq.c.timestamp).desc().nullslast()).label("altitude"),
        func.first_value(sq.c.version).over(
            partition_by=sq.c.receiver_id,
            order_by=case(
                [(sq.c.version == null(), None)],
                else_=sq.c.timestamp).desc().nullslast()).label("version"),
        func.first_value(sq.c.platform).over(
            partition_by=sq.c.receiver_id,
            order_by=case(
                [(sq.c.platform == null(), None)],
                else_=sq.c.timestamp).desc().nullslast()).label("platform"),
    ).subquery()

    # And insert them
    ins = insert(ReceiverStats).from_select(
        [
            ReceiverStats.receiver_id,
            ReceiverStats.date,
            ReceiverStats.firstseen,
            ReceiverStats.lastseen,
            ReceiverStats.location_wkt,
            ReceiverStats.altitude,
            ReceiverStats.version,
            ReceiverStats.platform,
        ],
        receiver_stats,
    )
    res = session.execute(ins)
    insert_counter = res.rowcount
    session.commit()
    logger.warn("ReceiverStats for {}: {} deleted, {} inserted".format(
        date, deleted_counter, insert_counter))

    # Update aircraft_beacon_count, aircraft_count and max_distance
    aircraft_beacon_stats = (session.query(
        AircraftBeacon.receiver_id,
        func.count(AircraftBeacon.timestamp).label("aircraft_beacon_count"),
        func.count(func.distinct(
            AircraftBeacon.device_id)).label("aircraft_count"),
        func.max(AircraftBeacon.distance).label("max_distance"),
    ).filter(
        and_(between(AircraftBeacon.timestamp, start,
                     end), AircraftBeacon.error_count == 0,
             AircraftBeacon.quality <= MAX_PLAUSIBLE_QUALITY,
             AircraftBeacon.relay == null())).group_by(
                 AircraftBeacon.receiver_id).subquery())

    upd = (update(ReceiverStats).where(
        and_(ReceiverStats.date == date, ReceiverStats.receiver_id ==
             aircraft_beacon_stats.c.receiver_id)).values({
                 "aircraft_beacon_count":
                 aircraft_beacon_stats.c.aircraft_beacon_count,
                 "aircraft_count":
                 aircraft_beacon_stats.c.aircraft_count,
                 "max_distance":
                 aircraft_beacon_stats.c.max_distance
             }))

    result = session.execute(upd)
    update_counter = result.rowcount
    session.commit()
    logger.warn("Updated {} ReceiverStats".format(update_counter))

    return "ReceiverStats for {}: {} deleted, {} inserted, {} updated".format(
        date, deleted_counter, insert_counter, update_counter)
    def __init__(self, group_strategy, timeseries_unit = 'day'):
        self.filterable_attributes = [DATE, CUSTOM_ATTRIBUTE, TRANSFER_ACCOUNT, USER]
        self.timeseries_unit = timeseries_unit
        self.metrics = []

        total_beneficiaries_query = db.session.query(User)
        self.metrics.append(metric.Metric(
            metric_name='total_beneficiaries',
            query=total_beneficiaries_query,
            object_model=User,
            stock_filters=[filters.beneficiary_filters],
            caching_combinatory_strategy=metrics_cache.COUNT,
            filterable_by=self.filterable_attributes))

        total_vendors_query = db.session.query(User)
        self.metrics.append(metric.Metric(
            metric_name='total_vendors',
            query=total_vendors_query,
            object_model=User,
            stock_filters=[filters.vendor_filters],
            caching_combinatory_strategy=metrics_cache.COUNT,
            filterable_by=self.filterable_attributes))

        # Timeseries Metrics
        if group_strategy:
            users_created_timeseries_query = group_strategy.build_query_group_by_with_join(db.session.query(func.count(User.id).label('volume'),
                    func.date_trunc(self.timeseries_unit, User.created).label('date'), group_strategy.group_by_column).group_by(func.date_trunc(self.timeseries_unit, User.created)), User)
            aggregated_users_created_query = group_strategy.build_query_group_by_with_join(db.session.query(func.count(User.id).label('volume'), group_strategy.group_by_column), User)
        else:
            users_created_timeseries_query = db.session.query(func.count(User.id).label('volume'),
                    func.date_trunc(self.timeseries_unit, User.created).label('date')).group_by(func.date_trunc(self.timeseries_unit, User.created))
            aggregated_users_created_query = None
        total_users_created_query = db.session.query(func.count(User.id).label('volume'))
        self.metrics.append(metric.Metric(
            metric_name='users_created',
            is_timeseries=True,
            query=users_created_timeseries_query,
            aggregated_query=aggregated_users_created_query,
            total_query=total_users_created_query,
            object_model=User,
            #stock_filters=[filters.beneficiary_filters], # NOTE: Do we still want this filter?
            stock_filters=[],
            caching_combinatory_strategy=metrics_cache.QUERY_ALL,
            filterable_by=self.filterable_attributes,
            query_actions=[FORMAT_TIMESERIES],
            aggregated_query_actions=[FORMAT_AGGREGATE_METRICS],
            total_query_actions=[GET_FIRST],
        ))

        if group_strategy:
            active_users_timeseries_query = group_strategy.build_query_group_by_with_join(db.session.query(func.count(func.distinct(CreditTransfer.sender_user_id)).label('volume'),
                    func.date_trunc(self.timeseries_unit, CreditTransfer.created).label('date'), group_strategy.group_by_column).group_by(func.date_trunc(self.timeseries_unit, CreditTransfer.created)), CreditTransfer)
            aggregated_active_users_query = group_strategy.build_query_group_by_with_join(db.session.query(func.count(func.distinct(CreditTransfer.sender_user_id)).label('volume'), group_strategy.group_by_column), CreditTransfer)
        else:
            active_users_timeseries_query = db.session.query(func.count(func.distinct(CreditTransfer.sender_user_id)).label('volume'),
                    func.date_trunc(self.timeseries_unit, CreditTransfer.created).label('date')).group_by(func.date_trunc(self.timeseries_unit, CreditTransfer.created))
            aggregated_active_users_query = None
        total_active_users_query = db.session.query(func.count(func.distinct(CreditTransfer.sender_user_id)).label('volume'))
        self.metrics.append(metric.Metric(
            metric_name='active_users',
            is_timeseries=True,
            query=active_users_timeseries_query,
            aggregated_query=aggregated_active_users_query,
            total_query=total_active_users_query,
            object_model=CreditTransfer,
            #stock_filters=[filters.beneficiary_filters], # NOTE: Do we still want this filter?
            stock_filters=[],
            caching_combinatory_strategy=metrics_cache.QUERY_ALL,
            filterable_by=self.filterable_attributes,
            query_actions=[FORMAT_TIMESERIES],
            aggregated_query_actions=[FORMAT_AGGREGATE_METRICS],
            total_query_actions=[GET_FIRST],
        ))
    def _process_events(klass,
                        events,
                        page,
                        query=None,
                        cities=None,
                        user=None,
                        flags=None,
                        selected_tags=None,
                        selected_categories=None,
                        future_only=None):
        if future_only:
            events_with_counts = events_with_counts.filter(
                or_(Event.start_time >= datetime.datetime.now(),
                    Event.end_time >= datetime.datetime.now()))

        events = events.filter(Event.status != Event.STATUS_CLOSED_PERM)

        events, selected_tags = klass._filter_events(
            events,
            query=query,
            categories=selected_categories,
            tags=selected_tags,
            flags=flags)

        event_cities = klass._cities_for_events(events)
        if cities:
            events = events.filter(Event.city.in_(cities))
            for city in event_cities:
                city['selected'] = city['chip_name'] in cities

        tags, categories = klass._tags_for_events(
            events=events,
            selected_categories=selected_categories,
            selected_tags=selected_tags)

        event_user_ids = None
        if user:
            event_ids = {e[0].event_id for e in events if e[1]}

            following_user_ids = alias(
                db_session.query(func.distinct(Follow.follow_id)).filter(
                    and_(UserEvent.event_id.in_(event_ids),
                         UserEvent.user_id == Follow.follow_id,
                         Follow.user_id == user.user_id,
                         Follow.follow_id != user.user_id,
                         Follow.active == True)), "following_user_ids")

            event_users = {
                str(u.user_id): {
                    'user_id': u.user_id,
                    'username': u.username,
                    'image_url': u.image_url
                }
                for u in User.query.filter(User.user_id.in_(
                    following_user_ids))
            }

            events_with_following_counts = db_session.query(
                UserEvent.event_id,
                func.array_agg(func.distinct(
                    User.user_id)).label('user_ids')).filter(
                        and_(Follow.user_id == user.user_id,
                             UserEvent.user_id == Follow.follow_id,
                             UserEvent.event_id.in_(event_ids),
                             Follow.follow_id != user.user_id)).group_by(
                                 UserEvent.event_id)
            event_user_ids = {
                row[0]: set(str(follower_id) for follower_id in row[1])
                for row in events_with_following_counts
            }

        events = klass._order_events(events)
        events = events.limit(klass.PAGE_SIZE).offset(
            (page - 1) * klass.PAGE_SIZE)

        results = []
        for event, user_count in events:
            event.card_user_count = user_count
            if event_user_ids and event.event_id in event_user_ids:
                event.card_event_users = [
                    event_users[x] for x in event_user_ids[event.event_id]
                    if x in event_users
                ]
            results.append(event)

        return results, categories, tags, event_cities, events
Beispiel #38
0
    __mapper_args__ = {"polymorphic_identity": "virtual_disk"}

    def __init__(self, **kw):
        if "address" not in kw or kw["address"] is None:
            raise ValueError("address is mandatory for shared disks")
        super(VirtualDisk, self).__init__(**kw)

    def __repr__(self):
        return "<%s %s (%s) of machine %s, %d GB, provided by %s>" % (
            self._get_class_label(),
            self.device_name,
            self.controller_type,
            self.machine.label,
            self.capacity,
            (self.share.name if self.share else "no_share"),
        )


# The formatter code is interested in the count of disks/machines, and it is
# cheaper to query the DB than to load all entities into memory
Share.disk_count = column_property(
    select([func.count()], VirtualDisk.share_id == Share.id).label("disk_count"), deferred=True
)

Share.machine_count = column_property(
    select([func.count(func.distinct(VirtualDisk.machine_id))], VirtualDisk.share_id == Share.id).label(
        "machine_count"
    ),
    deferred=True,
)