示例#1
0
def get_weekly_comments_count(since=None):
    approved = Comment.select(fn.date_trunc('week', Comment.created),
                              fn.count(Comment.id)).group_by(
                                  fn.date_trunc('week',
                                                Comment.created)).order_by(
                                                    SQL('date_trunc').asc())
    rejected = RejectedComment.select(
        fn.date_trunc('week', RejectedComment.created),
        fn.count(RejectedComment.id)).group_by(
            fn.date_trunc('week', RejectedComment.created)).order_by(
                SQL('date_trunc').asc())
    if since:
        approved = approved.where(Comment.created >= since)
        rejected = rejected.where(RejectedComment.created >= since)
    approved = approved.tuples()
    rejected = rejected.tuples()
    first_week = min(approved[0][0], rejected[0][0])
    last_week = max(approved[-1][0], rejected[-1][0])
    weeks = get_week_or_month_counter(metric='week',
                                      first_metric_value=first_week,
                                      last_metric_value=last_week)
    ret = merge_approved_rejected_list(metric_counter=weeks,
                                       approved=approved,
                                       rejected=rejected)
    return ret
示例#2
0
def insert_media():
    data = webapp_news_main\
        .select(webapp_news_main.source,\
                webapp_keyword.keyword,\
                fn.count(1),\
                fn.sum(webapp_news_main.share_count).alias('share_count'),\
                fn.sum(webapp_news_main.view_count).alias('view_count'),\
                fn.sum(webapp_news_main.positive).alias('positive'),\
                fn.sum(webapp_news_main.negative).alias('negative'),\
                fn.Lower('itri').alias('username')\
                )\
        .join(webapp_keyword_match,\
                on=(webapp_news_main.url == webapp_keyword_match.guid))\
        .join(webapp_keyword,\
                on=(webapp_keyword_match.kwgrp == webapp_keyword.kwgrp))\
        .where(webapp_keyword_match.kwgrp << (34,3,4,5,6,42,2,8,18,19,20,9),\
               ~(webapp_news_main.source % 'Match%'))\
        .group_by(webapp_news_main.source, webapp_keyword.keyword)\
        .order_by(webapp_news_main.source, webapp_keyword.keyword)
    print data
     
    query = (media_keyword_group\
             .insert_from(\
                 fields=[media_keyword_group.source,\
                         media_keyword_group.keyword,\
                         media_keyword_group.count,\
                         media_keyword_group.share_count,\
                         media_keyword_group.view_count,\
                         media_keyword_group.positive,\
                         media_keyword_group.negative,\
                         media_keyword_group.username,\
                         ],\
                 query=data)\
             .execute())
    print query
示例#3
0
def counts(out_file):

    """
    Update the institutions CSV with the number of docs in the OSP corpus.
    """

    cols = ['name', 'url', 'count']
    writer = csv.DictWriter(out_file, cols)
    writer.writeheader()

    count = fn.count(Institution_Document.id)

    query = (
        Institution
        .select(Institution, count)
        .join(Institution_Document)
        .group_by(Institution.id)
        .order_by(count.desc())
    )

    for row in query:

        writer.writerow(dict(
            name=row.name,
            url=row.url,
            count=row.count,
        ))
示例#4
0
def msg_daily(bot, job):

    # chat_id = update.message.chat_id

    today = datetime.today() - timedelta(hours=6)
    yesterday = today - timedelta(days=1)

    Connection.db_connect()

    total_daily = Message.select().where(Message.date > yesterday,
                                         Message.date < today)
    msg = 'Reporte últimas 24hrs\n\n'\
          f'Mensajes\n └ Totales: {total_daily.count()}\n'
    top_ten = (Message.select(
        Message.user,
        fn.count(Message.user).alias('num_messages')).where(
            Message.date > yesterday,
            Message.date < today).group_by(Message.user).limit(10))
    tops = ''
    count = 1
    for i in top_ten:
        tops += f'TOP {count}: \n'\
                f' ├alias: @{i.user.username} \n'\
                f' ├nombre: {i.user.first_name}\n'\
                f' └enviados: {i.num_messages}\n'
        count += 1

    bot.send_message(job.context, text=msg + tops)
    Connection.db_close()
示例#5
0
def contract_finish(body):
    if "contract_id" not in body:
        return
    contract = Contract.select().where(Contract.id == body["contract_id"]).first()
    if not contract:
        return

    ts = TeamStatistics().select().where(TeamStatistics.team == contract.team).first()
    if not ts:
        ts = TeamStatistics()
        ts.user = contract.team.user 
        ts.team = contract.team
    ts.total_amount += contract.total_amount
    if contract.hourly > 0:
        time_count = WeekStone.select(fn.sum(WeekStone.shot_times)).where(WeekStone.contract==contract).scalar()
        ts.hours += time_count
    ts.save()

    us = UserStatistics.select().where(UserStatistics.user == contract.user).first()
    if not us:
        us = UserStatistics()
        us.user = contract.user
    us.total_amount += contract.total_amount
    if contract.hourly > 0:
        us.hours += time_count
    us.coop_success += 1

    coop_two_count = Contract.select().where(Contract.user==contract.user, 
                    Contract.status=="finish").group_by(Contract.team).having(fn.count(Contract.team) >= 2).count()
    us.coop_two = coop_two_count
    us.save()
示例#6
0
def get_monthly_top_commented_articles(top=10, since=None):
    months = Comment.select(fn.date_trunc('month', Comment.created))
    if since:
        months = months.where(Comment.created >= since)
    months = months.group_by(fn.date_trunc('month', Comment.created)).order_by(
        SQL('date_trunc').asc()).tuples()
    month_top_commented_articles_map = {m[0]: [] for m in months}
    output_formatter = lambda mtc: (Asset.get_by_id(mtc[1]).url, mtc[2])
    for month in months:
        month_top_commented_articles_map[month[0]].extend(
            list(
                map(
                    output_formatter,
                    Comment.select(
                        fn.date_trunc('month',
                                      Comment.created), Comment.asset_id,
                        fn.count(Comment.id)).group_by(
                            fn.date_trunc('month', Comment.created),
                            Comment.asset_id).where(
                                fn.date_trunc('month', Comment.created) ==
                                month).order_by((SQL('date_trunc')),
                                                (SQL('count')).desc()).limit(
                                                    int(top)).tuples())))
    first_month = min(month_top_commented_articles_map.keys())
    last_month = max(month_top_commented_articles_map.keys())
    months = get_week_or_month_counter(metric='month',
                                       first_metric_value=first_month,
                                       last_metric_value=last_month)
    monthly_top_commented_articles = list(
        month_top_commented_articles_map.items())
    ret = fill_output_with_default_values(
        metric_counter=months,
        output=monthly_top_commented_articles,
        default_value=[])
    return ret
示例#7
0
    def add_edges(self, max_texts=20):
        """
        For each syllabus, register citation pairs as edges.

        Args:
            max_texts (int): Ignore docs with > than N citations.
        """

        text_ids = (fn.array_agg(Text.id).coerce(False).alias('text_ids'))

        docs = (Citation.select(Citation.document, text_ids).join(Text).having(
            fn.count(Text.id) <= max_texts).where(Text.display == True).where(
                Text.valid == True).group_by(Citation.document))

        for row in query_bar(docs):
            for tid1, tid2 in combinations(row.text_ids, 2):

                # If the edge exists, increment the weight.

                if self.graph.has_edge(tid1, tid2):
                    self.graph[tid1][tid2]['weight'] += 1

                # Otherwise, initialize the edge.

                else:
                    self.graph.add_edge(tid1, tid2, weight=1)
示例#8
0
文件: network.py 项目: overview/osp
    def add_edges(self, max_citations=50):

        """
        For each syllabus, register citation pairs as edges.

        Args:
            max_citations (int): Discard documents with > N citations.
        """

        # Aggregate the CNs.
        texts = (
            fn.array_agg(HLOM_Record.control_number)
            .coerce(False)
            .alias('texts')
        )

        # Select syllabi and cited CNs.
        documents = (
            HLOM_Citation
            .select(HLOM_Citation.document, texts)
            .join(HLOM_Record)
            .having(fn.count(HLOM_Record.id) <= max_citations)
            .distinct(HLOM_Citation.document)
            .group_by(HLOM_Citation.document)
        )

        for row in query_bar(documents):
            for cn1, cn2 in combinations(row.texts, 2):

                # If the edge exists, +1 the weight.
                if self.graph.has_edge(cn1, cn2):
                    self.graph[cn1][cn2]['weight'] += 1

                # Otherwise, initialize the edge.
                else: self.graph.add_edge(cn1, cn2, weight=1)
示例#9
0
def messages_by_tags(tags, page):
    messages = (Message.select(Message).where(
        Message.id << (MessageTags.select(MessageTags.message_id).where(
            MessageTags.tag_id << [tag.id for tag in tags]).group_by(
                MessageTags.message_id).having(
                    fn.count(MessageTags.tag_id) == len(tags)))))
    return messages
示例#10
0
    def CountNoReadPush(self, request, context):
        count = 0

        result_code = ResultCode.UNKNOWN_RESULT_CODE
        result_message = "Unknown count no read push Result"

        db = pwdb.database

        with db.atomic() as transaction:
            try:
                query = (PushLog.select(fn.count(
                    PushLog.id).alias('count')).where(
                        (PushLog.receiver_email == context.login_email)
                        & (PushLog.is_read == False)))

                for row in query:
                    count = row.count

                result_code = ResultCode.SUCCESS
                result_message = "count no read success"

            except Exception as e:
                transaction.rollback()
                result_code = ResultCode.ERROR
                result_message = str(e)
                print("EXCEPTION: " + str(e))

        return CountNoReadPushResponse(
            result=CommonResult(
                result_code=result_code,
                message=result_message,
            ),
            count=count,
        )
示例#11
0
def get_featured_comments():
    featured_comments = Comment.select(Comment.commenter, fn.count(
        Comment.id)).group_by(
            Comment.commenter).where(Comment.editors_pick == True).tuples()
    count = sum([count for commenter, count in featured_comments])
    commenters = [(commenter['name'], count)
                  for commenter, count in featured_comments]
    return {'count': count, 'commenters': commenters}
示例#12
0
def get_last2days_top_commented_articles(top=10):
    last2days_top_commented_articles = Comment.select(
        Comment.asset_id, fn.count(Comment.id)).group_by(
            Comment.asset_id).where(Comment.created >= arrow.utcnow().shift(
                days=-int(2)).span('day')[0].date()).order_by(
                    (SQL('count')).desc()).limit(int(top))
    return [(Asset.get_by_id(asset_id).url, count)
            for asset_id, count in last2days_top_commented_articles.tuples()]
示例#13
0
def get_yearly_unique_commenters_count():
    yearly_commenters_count = Comment.select(
        fn.date_trunc('year', Comment.created),
        fn.count(fn.Distinct(Comment.commenter_id))).group_by(
            fn.date_trunc('year',
                          Comment.created)).order_by(SQL('date_trunc').asc())
    return [(m.date().isoformat(), c)
            for m, c in yearly_commenters_count.tuples()]
示例#14
0
    def get_year_by_counts(cls) -> List[Tuple[int, int]]:
        fn_year = fn.strftime('%Y', cls.date).cast('INTEGER')
        query = (cls.select(
            fn_year.alias('year'),
            fn.count(
                cls.id).alias('count')).group_by(fn_year).order_by(fn_year))

        return [(row.year, row.count) for row in query]
示例#15
0
 def entry_histogram(self):
     "Return a list of the number of entries per day"
     data = (Entry.select(
         fn.date(Entry.created_at).alias("date"),
         fn.min(Entry.id).alias("id"),
         fn.count(Entry.id).alias("count")).group_by(
             fn.date(Entry.created_at)).order_by(fn.date(Entry.created_at)))
     return [(e.date.timestamp(), e.id, e.count) for e in data]
示例#16
0
def get_curr_year_top_commenters(top=3):
    curr_year_top_commenters = Comment.select(
        fn.date_trunc('year', Comment.created), Comment.commenter,
        fn.count(Comment.id)).group_by(fn.date_trunc(
            'year', Comment.created), Comment.commenter).where(
                Comment.created >= arrow.utcnow().span('year')[0].date()
            ).order_by((SQL('count').desc())).limit(int(top))
    return [(commenter['name'], count)
            for y, commenter, count in curr_year_top_commenters.tuples()]
示例#17
0
def get_hourly_comments_count(since=None):
    comments = Comment.select(fn.date_part('hour', Comment.created),
                              fn.count(Comment.id)).group_by(
                                  fn.date_part('hour',
                                               Comment.created)).order_by(
                                                   SQL('date_part').asc())
    if since:
        comments = comments.where(Comment.created >= since)
    return [(h, c) for (h, c) in comments.tuples()]
示例#18
0
文件: alert_push.py 项目: ywchiu/dmap
def alert_news(difftime, criteria, count_by):
    criteria = int(criteria)
    difftime = int(difftime) * 60
    totm = datetime.now().strftime('%s')
    todt = datetime.fromtimestamp(\
                int(totm),FROMTZ\
            ).strftime('%Y-%m-%d %H:%M:%S')
    fromtm = int(totm) - difftime
    fromdt = datetime.fromtimestamp(\
                fromtm,FROMTZ\
            ).strftime('%Y-%m-%d %H:%M:%S')
    wnp = webapp_news_push\
              .select(\
                  webapp_news_push.url,\
                  webapp_news_main.title,\
                  webapp_news_main.date,\
                  fn.count(1).alias('counts')
                     )\
              .join(\
                  webapp_news_main,\
                  on = (webapp_news_push.url == webapp_news_main.url)\
                    )\
              .where(\
                  webapp_news_main.tm > fromtm,\
                  webapp_news_push.tm > fromtm,\
                    )\
              .group_by(\
                  webapp_news_push.url,\
                  webapp_news_main.title,\
                  webapp_news_main.date\
                       )\
              .naive()
    dic = {}
    alert_list = list()
    for w in wnp:
        url = w.url
        dic[url] = {\
                      'counts':w.counts,\
                      'title':w.title,\
                      #'date':w.date.strftime('%Y-%m-%d %H:%M:%S')\
                      'date':fromdt + ' 至 ' + todt \
                    }
        record = dic[url]
        if count_by == 'comments':
            cnt = record['counts']
        if cnt >= criteria:
            alert_list.append(url)
    alert_list = set(alert_list)
    result_dic = {}
    if len(alert_list) == 0:
        return None
    else:
        for url in alert_list:
            result_dic[url] = dic[url]
        return result_dic
示例#19
0
 def query_pan_acc_count_by_acc_id(cls, acc_id):
     model_rs: ModelSelect = PanAccounts.select(fn.count(
         PanAccounts.id)).where(
             PanAccounts.user_id == acc_id).alias('count')
     if model_rs:
         model_dict = model_rs.dicts()
         if model_dict:
             v = model_dict[0].get('count')
             if v:
                 return v
     return 0
示例#20
0
    def get_year_by_number(cls) -> List[Tuple[int, int]]:
        fn_year = fn.strftime('%Y', cls.finish_datetime).cast('INTEGER')

        year_by_number = []
        for game in (cls.select(
                fn_year.alias('year'),
                fn.count(cls.id).alias('count')).where(
                    cls.finish_datetime.is_null(False),
                    cls.ignored == 0).group_by(fn_year).order_by(
                        fn_year.desc())):
            year_by_number.append((game.year, game.count))

        return year_by_number
示例#21
0
文件: tree.py 项目: djirik/mkb10
def get_group(block):
    Child = MKB10.alias()
    qs = (MKB10.select(MKB10.id, MKB10.code, MKB10.name,
                       (fn.count(Child.id) > 0).alias('ct')).join(
                           Child,
                           JOIN.LEFT_OUTER,
                           on=(Child.parent == MKB10.id)).where(
                               MKB10.parent == block).group_by(
                                   MKB10.id).order_by(MKB10.code))
    qs = actual_filter(qs)
    # A little magic: `update()` method of dict always returns `None`
    # It means we always get `None or x` and return an updated dict
    result = map(lambda x: x.update(has_subgroup=bool(x.pop('ct'))) or x,
                 qs.dicts())
    return jsonify(tuple(result))
示例#22
0
文件: banks.py 项目: baadjis/bankr
def get_bank_users(bank_name=None):
    nb_users = (User.select(fn.count(fn.Distinct(User.id)), Bank.name).join(
        Account, on=(User.id == Account.user_id)).join(Bank).group_by(Bank))

    if bank_name is not None:
        db_bank = Bank.get_or_none(name=bank_name)
        if db_bank is None:
            raise BankNotFoundError(bank_name)
        else:
            nb_users = nb_users.where(Bank.name == bank_name)
            print(nb_users)

    return [{
        "name": nb_user.account.bank.name,
        "number of users": nb_user.count
    } for nb_user in nb_users]
示例#23
0
 def query_assets_count_by_ref_id(cls, ref_id, pin=None):
     model_rs: ModelSelect = Assets.select(
         fn.count(Assets.id).alias('count'))
     if pin is None:
         model_rs = model_rs.where(Assets.ref_id == ref_id)
     else:
         model_rs = model_rs.where(Assets.ref_id == ref_id
                                   and Assets.pin == pin)
     # print("query_assets_count_by_ref_id sql:", model_rs)
     if model_rs:
         model_dict = model_rs.dicts()
         if model_dict:
             v = model_dict[0].get('count')
             if v:
                 return v
     return 0
def show():
    # https://<url>/api/v1/timeslots/show?d=<d>&m=<m>&y=<y>
    d = int(request.args.get('d'))
    m = int(request.args.get('m'))
    y = int(request.args.get('y'))

    slots_taken = []
    # Select all the orders that match the selected_date and has 12 images in the order
    orders = Order.select()\
        .join(Image)\
        .where((fn.date_trunc('day', Order.start_time) == datetime(y, m, d)))\
        .group_by(Order.id)\
        .having(fn.count(Image.id) == 12)

    slots_taken = [o.start_time for o in orders]

    return jsonify({'slotsTaken': slots_taken})
示例#25
0
def get_pending_comments_by_asset(since=None):
    pending_comments_by_asset = PendingComment.select(
        PendingComment.asset_id,
        fn.count(PendingComment.id)).group_by(PendingComment.asset_id)
    if since:
        pending_comments_by_asset = pending_comments_by_asset.where(
            PendingComment.created >= since)
    total_pending = sum(
        [count for asset_id, count in pending_comments_by_asset.tuples()])
    pending_comments_by_asset = [
        (Asset.get_by_id(asset_id).url, count)
        for asset_id, count in pending_comments_by_asset.tuples()
    ]
    return {
        'total_pending': total_pending,
        'pending_comments_by_asset': pending_comments_by_asset
    }
示例#26
0
def get_monthly_unique_commenters_count(since=None):
    monthly_commenters_count = Comment.select(
        fn.date_trunc('month', Comment.created),
        fn.count(fn.Distinct(Comment.commenter_id))).group_by(
            fn.date_trunc('month',
                          Comment.created)).order_by(SQL('date_trunc').asc())
    if since:
        monthly_commenters_count = monthly_commenters_count.where(
            Comment.created >= since)
    monthly_commenters_count = monthly_commenters_count.tuples()
    first_month = monthly_commenters_count[0][0]
    last_month = monthly_commenters_count[-1][0]
    months = get_week_or_month_counter(metric='month',
                                       first_metric_value=first_month,
                                       last_metric_value=last_month)
    ret = fill_output_with_default_values(metric_counter=months,
                                          output=monthly_commenters_count)
    return ret
def get_free_timeslots_between(db: directives.PeeweeSession, start: datetime, end: datetime):
    with db.atomic():
        now = datetime.now(tz=config.Settings.tz).replace(tzinfo=None)
        slots = TimeSlot \
            .select(TimeSlot.start_date_time, TimeSlot.length_min,
                    fn.count(Appointment.time_slot).alias("free_appointments")) \
            .join(Appointment) \
            .where(
                (TimeSlot.start_date_time >= start) & (TimeSlot.start_date_time <= end) &
                (Appointment.claim_token.is_null() | (Appointment.claimed_at +
                                                      timedelta(
                                                          minutes=config.Settings.claim_timeout_min) < now)) &
                (Appointment.booked == False)
            ) \
            .group_by(TimeSlot.start_date_time, TimeSlot.length_min) \
            .order_by(TimeSlot.start_date_time) \
            # @formatter:on
        return [{"startDateTime": str(slot.start_date_time)} for slot in slots]
示例#28
0
def next_free_slots(db: PeeweeSession,
                    user: hug.directives.user,
                    at_datetime: hug.types.text = None):
    """
    SELECT t.start_date_time, count(a.time_slot_id)
FROM appointment a
         JOIN timeslot t ON a.time_slot_id = t.id
WHERE a.booked IS false
  AND a.claim_token ISNULL
  AND t.start_date_time > NOW()
GROUP BY t.start_date_time
ORDER BY t.start_date_time
    """
    with db.atomic():
        # @formatter:off
        if at_datetime is not None:
            now = datetime.fromisoformat(at_datetime).replace(tzinfo=None)
        else:
            now = datetime.now(tz=config.Settings.tz).replace(tzinfo=None)
        slots = TimeSlot \
            .select(TimeSlot.start_date_time, TimeSlot.length_min,
                    fn.count(Appointment.time_slot).alias("free_appointments")) \
            .join(Appointment) \
            .where(
                (TimeSlot.start_date_time > now) &
                (Appointment.claim_token.is_null() | (Appointment.claimed_at +
                                                      timedelta(
                                                          minutes=config.Settings.claim_timeout_min) < now)) &
                (Appointment.booked == False)
            ) \
            .group_by(TimeSlot.start_date_time, TimeSlot.length_min) \
            .order_by(TimeSlot.start_date_time) \
            .limit(config.Settings.num_display_slots)
        # @formatter:on
        return {
            "slots": [{
                "startDateTime": slot.start_date_time,
                "freeAppointments": slot.free_appointments,
                "timeSlotLength": slot.length_min
            } for slot in slots],
            "coupons":
            user.coupons
        }
示例#29
0
def get_rejected_comments(since=None):
    rejected_comments = {i.name: {} for i in rejection_reasons}
    rejected = RejectedComment.select(RejectedComment.reason,
                                      RejectedComment.commenter,
                                      fn.count(RejectedComment.id)).group_by(
                                          RejectedComment.reason,
                                          RejectedComment.commenter)
    if since:
        rejected = rejected.where(RejectedComment.created >= since)
    for reasons in rejected_comments.keys():
        count = sum([
            count for reason, commenter, count in rejected.tuples()
            if reason == rejection_reasons[reasons].value
        ])
        commenters = [(commenter['name'], count)
                      for reason, commenter, count in rejected.tuples()
                      if reason == rejection_reasons[reasons].value]
        rejected_comments[reasons]['count'] = count
        rejected_comments[reasons]['commenter'] = commenters
    return rejected_comments
示例#30
0
def fuzz(out_file, min_count):

    """
    Write a CSV with title and fuzz.
    """

    cols = [
        'text_id',
        'count',
        'fuzz',
        'surname',
        'title',
    ]

    writer = csv.DictWriter(out_file, cols)
    writer.writeheader()

    count = fn.count(Citation.id)

    query = (
        Text
        .select(Text, count)
        .join(Citation)
        .where(Text.display==True)
        .having(count > min_count)
        .group_by(Text.id)
        .naive()
    )

    texts = list(query)

    # Sort on fuzz, high -> low.
    for t in sorted(texts, key=lambda t: t.fuzz, reverse=True):

        writer.writerow(dict(
            text_id=t.id,
            count=t.count,
            fuzz=t.fuzz,
            surname=t.surname,
            title=t.title,
        ))
    def add_edges(self, max_texts=20):

        """
        For each syllabus, register citation pairs as edges.

        Args:
            max_texts (int): Ignore docs with > than N citations.
        """

        text_ids = (
            fn.array_agg(Text.id)
            .coerce(False)
            .alias('text_ids')
        )

        docs = (
            Citation
            .select(Citation.document, text_ids)
            .join(Text)
            .having(fn.count(Text.id) <= max_texts)
            .where(Text.display==True)
            .where(Text.valid==True)
            .group_by(Citation.document)
        )

        for row in query_bar(docs):
            for tid1, tid2 in combinations(row.text_ids, 2):

                # If the edge exists, increment the weight.

                if self.graph.has_edge(tid1, tid2):
                    self.graph[tid1][tid2]['weight'] += 1

                # Otherwise, initialize the edge.

                else:
                    self.graph.add_edge(tid1, tid2, weight=1)
示例#32
0
def lookup_icd():
    q = request.args.get('q')
    if not q:
        return jsonify(err='bad_param', msg='missing required parameter: q')

    # gets parents of subgroups for exclude from results
    subq = (MKB10.select(MKB10.parent).where(MKB10.actual == True).where(
        MKB10.code.is_null(False)).group_by(
            MKB10.parent).having(fn.count(MKB10.parent) > 0).alias('subq'))

    qs = (MKB10.select(MKB10.name,
                       MKB10.code).where(MKB10.actual == True).where(
                           MKB10.code.is_null(False)).where(
                               MKB10.id.not_in(subq)))

    if re.match('[a-z]', q[0], re.IGNORECASE):
        qs = qs.where(MKB10.code.startswith(q))
    else:
        qs = qs.where(fn.lower_case(MKB10.name).contains(q.lower()))

    limit = request.args.get('limit', '50')
    qs = qs.limit(int(limit) if limit.isdigit() else 50)

    return jsonify(tuple(qs.dicts()))
示例#33
0
	def updateCache_MachinePart(self):
		"""
		Update the cache regarding machine parts
		"""

		# Count levels per machine part
		partCounters = {}

		# Select all machine parts
		parts = { }
		for p in MachinePart.select(
				MachinePart,
				fn.Count(MachinePartStage.id).alias("total"),
			) \
			.group_by( MachinePart.id ) \
			.join( MachinePartStage ):

			# Get part IDs
			p_ids = []

			# Get all unlocked parts
			for up in MachinePartStageUnlock \
				.select(
					MachinePartStageUnlock.id, 
					MachinePartStageUnlock.stage, 
					MachinePartStageUnlock.user,
					MachinePartStage.id.alias("stage_part_id")
				 ) \
				.join( MachinePartStage ) \
				.where(
					  (MachinePartStage.part == p)
					& (MachinePartStageUnlock.user == self.dbUser)
				):

				# Collect IDs
				p_ids.append(up.id)

			# Count unlockable parts
			whereQuery = (MachinePartStage.part == p) \
					  &  (MachinePartStage.cost <= self.dbUser.points)
			if len(p_ids) > 0:
				whereQuery &= ~(MachinePartStage.id << p_ids)
			unlockable = MachinePartStage.select(
					fn.count( MachinePartStage.id )
				)\
				.where(whereQuery) \
				.limit(1) \
				.scalar()

			# Unlockable is at max 1
			if unlockable > 1:
				unlockable = 1

			# Update total
			parts[p.name] = {
				"total": p.total,
				"unlocked": len(p_ids),
				"unlockable": unlockable
			}


		# Update state
		self.dbUser.setState("partcounters", parts )
	def clientsXOrders():
		query = Person.select(Person.name, Person.cpf, fn.count(Order.id).alias('total_orders')).join(Order, on=(Person.id == Order.client)).group_by(Person.id).order_by(fn.count(Order.id).desc())
		#return [ row for row in query ]
		return query
示例#35
0
    def type_stats(self):
        query = Alarm.select(Alarm.type, fn.count(Alarm.type).alias('count')).where(
            Alarm.status.in_([AlarmStatus.new, AlarmStatus.unsolved])).group_by(Alarm.type)

        result = {item.type: int(item.count) for item in query}
        return self.response(result)
示例#36
0
 def getMaxDimSize(self):
     if self.maxDimSize is None:
         self.maxDimSize = Tags.select(fn.count(Tags.id)).scalar()
     return self.maxDimSize
示例#37
0
	def get_tagcloud(self):
		return Tag.select(Tag,fn.count(Tag.name).alias('count')).group_by(Tag.name)
        1 for modinf in info['mod_info']
        if modinf.state in {TURF_LOOKUP['lead'], TURF_LOOKUP['backup']})

    return render_template(
        "turf_row.html", show=show, info=info, modid=modid, modname=modname,
        hi_post_thresh=hi_post_thresh, parity=parity,
        TURF_LOOKUP=TURF_LOOKUP)


################################################################################
### Turfs CSV dump

turfs_query = Show.select(
    Show,
    Turf
    .select(fn.count(SQL('*')))
    .where((Turf.state == TURF_LOOKUP['lead']) & (Turf.show == Show.id))
    .alias('leadcount'),
    Turf
    .select(fn.count(SQL('*')))
    .where((Turf.state == TURF_LOOKUP['backup']) & (Turf.show == Show.id))
    .alias('helpercount'),
    Turf
    .select(fn.group_concat(NodeList((Mod.name, SQL("SEPARATOR ', '")))))
    .join(Mod)
    .where((Turf.show == Show.id) & (Turf.state == TURF_LOOKUP['lead']))
    .alias('leads'),
    Turf
    .select(fn.group_concat(NodeList((Mod.name, SQL("SEPARATOR ', '")))))
    .join(Mod)
    .where((Turf.show == Show.id) & (Turf.state == TURF_LOOKUP['backup']))