Example #1
0
def user_page(user_name):
    # Munge the user_name, and hope
    user_name = user_name.replace('_', ' ').lower()
    user = g.conn.session.query(User).filter(func.lower(User.username) == user_name).one()
    stats = {
        'query_count': g.conn.session.query(func.count(Query.id)).filter(Query.user_id == user.id).scalar(),
        'stars_count': g.conn.session.query(func.count(Star.id)).filter(Star.user_id == user.id).scalar()
    }
    draft_queries = g.conn.session.query(Query) \
        .filter(Query.user_id == user.id) \
        .filter_by(published=False) \
        .order_by(desc(Query.last_touched)) \
        .limit(10)
    published_queries = g.conn.session.query(Query)\
        .filter(Query.user_id == user.id)\
        .filter_by(published=True)\
        .order_by(desc(Query.last_touched))\
        .limit(10)
    stars = g.conn.session.query(Star).join(Star.query) \
        .options(joinedload(Star.query))\
        .filter(Star.user_id == user.id) \
        .order_by(desc(Star.timestamp))\
        .limit(10)
    return render_template(
        "user.html",
        display_user=user,
        user=get_user(),
        stats=stats,
        draft_queries=draft_queries,
        published_queries=published_queries,
        stars=stars
    )
Example #2
0
    def get_activitychart_data(self):
        days = [date(2012, 12, 17) + timedelta(weeks=w) for w in range(52 * 4)]

        proposals_by_day = group_by_week(
            db.session.query(
                Proposal.date,
                func.count('*'),
            )
            .join(Proposal.sponsorships)
            .filter(Sponsorship.mandate_id == self.mandate.id)
            .filter(Proposal.date >= days[0])
            .group_by(Proposal.date)
        )

        questions_by_day = group_by_week(
            db.session.query(
                Question.date,
                func.count('*'),
            )
            .join(Question.asked)
            .filter(Ask.mandate_id == self.mandate.id)
            .filter(Question.date >= days[0])
            .group_by(Question.date)
        )

        series = []
        for day in days:
            series.append({
                'date': day,
                'proposals': proposals_by_day.get(day, 0),
                'questions': questions_by_day.get(day, 0),
                'vacation': any(d0 <= day < d1 for d0, d1 in VACATIONS),
            })

        return series
Example #3
0
def stats(**kwargs):
    """
    Helper that defines some useful statistics about certifications.

    :param kwargs:
    :return:
    """
    if kwargs.get('metric') == 'not_after':
        start = arrow.utcnow()
        end = start.replace(weeks=+32)
        items = database.db.session.query(Certificate.issuer, func.count(Certificate.id))\
            .group_by(Certificate.issuer)\
            .filter(Certificate.not_after <= end.format('YYYY-MM-DD')) \
            .filter(Certificate.not_after >= start.format('YYYY-MM-DD')).all()

    else:
        attr = getattr(Certificate, kwargs.get('metric'))
        query = database.db.session.query(attr, func.count(attr))

        items = query.group_by(attr).all()

    keys = []
    values = []
    for key, count in items:
        keys.append(key)
        values.append(count)

    return {'labels': keys, 'values': values}
Example #4
0
    def match(self, level, limit=10):
        '''
        Returns a list of UserSkillMatch objects, in descending order of number
        of skills matched for each user.
        '''

        skills_to_learn = [
            s.name for s in
            self.skills if s.level == LEVELS['LEVEL_I_WANT_TO_LEARN']['score']
        ]
        if skills_to_learn:
            matched_users = User.query_in_deployment().\
                            add_column(func.string_agg(UserSkill.name, ',')).\
                            add_column(func.count(UserSkill.id)).\
                            filter(UserSkill.name.in_(skills_to_learn)).\
                            filter(User.id == UserSkill.user_id).\
                            filter(UserSkill.level == level).\
                            filter(UserSkill.user_id != self.id).\
                            group_by(User).\
                            order_by(func.count().desc()).\
                            limit(limit)
        else:
            matched_users = []

        for user, question_ids_by_comma, count in matched_users:
            yield UserSkillMatch(user, question_ids_by_comma.split(','))
Example #5
0
    def post(self):
        import pluricent as pl
        from sqlalchemy import func, distinct
        import numpy as np
        import json
        args = {}
        p = pl.Pluricent(pl.global_settings()['database'])
        datatypes = [e[0] for e in p.session.query(distinct(pl.models.Processing.datatype)).all()]

        table = []
        headers = ['subject', 't1image']
        q = []
        q.append(dict(p.session.query(pl.models.Subject.identifier, func.count(pl.models.T1Image.path)).filter(pl.models.T1Image.subject_id == pl.models.Subject.id).group_by(pl.models.Subject.identifier).all()))
        for each in datatypes:
            headers.append(each)
            res = p.session.query(pl.models.Subject.identifier, func.count(pl.models.Processing.path)).join(pl.models.T1Image).filter(pl.models.Processing.input_id == pl.models.T1Image.id).filter(pl.models.T1Image.subject_id == pl.models.Subject.id).filter(pl.models.Processing.datatype==each).group_by(pl.models.Subject.identifier).all()
            q.append(dict(res))

        subjects = [e[0] for e in p.session.query(pl.models.Subject.identifier).all()]
        table.append(headers)
        for s in subjects:
            table.append([s])
            table[-1].extend([each.get(s, 0) for each in q])

        #print t1images
        args['images'] = table

        res = json.dumps(args)
        self.write(res)

        return None
Example #6
0
def admin(request):
    authorized_admin(request)
    settings = request.registry.settings
    all = DBSession.query(Application).all()
    unapproved = DBSession.query(func.count(Application.hardware),
                                 Application.hardware) \
                .filter_by(approved=False).group_by(Application.hardware).all()
    approved = DBSession.query(func.count(Application.hardware),
                               Application.hardware) \
                .filter_by(approved=True).group_by(Application.hardware).all()
    hardware = defaultdict(lambda: defaultdict(int))
    selected = []

    for hw in settings['hardware'].split():
        hardware[hw]['num'] = int(settings['num_%s' % hw])
    for num, hw in approved:
        hardware[hw]['approved'] = num
    for num, hw in unapproved:
        hardware[hw]['unapproved'] = num
        entries = DBSession.query(Application) \
                .filter_by(hardware=hw, approved=False).all()
        num_entries = len(entries)
        sample_size = hardware[hw]['num'] - hardware[hw]['approved']
        if sample_size <= 0:
            continue
        if sample_size > num_entries:
            sample_size = num_entries
        selected.extend([entry.id for entry in
            random.sample(entries, sample_size)])

    return {'hardware': hardware, 'applications': all, 'selected': selected}
Example #7
0
    def test_generic_count(self):
        assert isinstance(func.count().type, sqltypes.Integer)

        self.assert_compile(func.count(), 'count(*)')
        self.assert_compile(func.count(1), 'count(:count_1)')
        c = column('abc')
        self.assert_compile(func.count(c), 'count(abc)')
Example #8
0
def main():
	path = "../data/"

	for root, dirnames, filenames in os.walk(path):
		for filename in fnmatch.filter(filenames, '*.xml'):
			source = os.path.join(root, filename)
			print "Current file is: " + source
			h = Hansard(source)
			h.parse_xml()


	for source in glob.glob(os.path.join(path, '*.xml')):
		print "Current file is: " + source
		h = Hansard(source)
		h.parse_xml()

	global session
#	session.query(func.count(Speaker.speaker_id))
	# _parse_xml_file(source)
	speakers = session.query(func.count(Speaker.speaker_id)).all() # about .01 sec
	votes = session.query(func.count(Vote.id)).all() # about .01 sec
	divisions = session.query(func.count(Division.division_hash)).all() # about .01 sec
	speechs = session.query(func.count(Speech.id)).all() # about .01 sec
	votes = session.query(func.count(Vote.id)).all() # about .01 sec
	print speakers, votes, divisions, speechs
Example #9
0
def get_query(camp_idx, *args, group_by=None, area_idx=None):
    '''
    항목별 통계를 쉽게 뽑아내기 위한 메타쿼리
    '''
    if group_by is None:
        base_query = db.session.query(func.count('*'), func.count(Member.payment), func.sum(Member.attend_yn))
    else:
        base_query = db.session.query(getattr(Member, group_by), func.count('*'), func.count(Member.payment), func.sum(Member.attend_yn))
    base_query = base_query.select_from(Member).outerjoin(Member.payment).filter(Member.camp_idx == camp_idx, Member.cancel_yn == 0)

    if area_idx is not None:
        base_query = base_query.filter(Member.area_idx == area_idx)

    filtered_query = base_query
    for key, value in args:
        if value in ['none', 'not_none']:
            if value == 'none':
                filtered_query = filtered_query.filter(getattr(Member, key).is_(None))
            else:
                filtered_query = filtered_query.filter(getattr(Member, key).isnot(None))
        else:
            filtered_query = filtered_query.filter(getattr(Member, key) == value)

    if group_by is not None:
        filtered_query = filtered_query.group_by(getattr(Member, group_by))

    return filtered_query
def upgrade():
    if not is_sqlite:
        connection = op.get_bind()
        attendees = connection.execute(select([
            attendee_table.c.hotel_pin,
            func.count(attendee_table.c.id),
            func.array_agg(attendee_table.c.id),
        ]).where(and_(
            attendee_table.c.hotel_pin != None,
            attendee_table.c.hotel_pin != '',
        )).group_by(
            attendee_table.c.hotel_pin,
        ).having(
            func.count(attendee_table.c.id) > 1,
        ))
        for hotel_pin, count, ids in attendees:
            hotel_pin_template = '{{:0{}d}}{{}}'.format(len(str(count))) if count > 9 else '{}{}'

            for i, id in enumerate(ids):
                new_hotel_pin = hotel_pin_template.format(i, hotel_pin)
                connection.execute(
                    attendee_table.update().where(attendee_table.c.id == id).values({
                        'hotel_pin': new_hotel_pin,
                    })
                )

    op.create_unique_constraint(op.f('uq_attendee_hotel_pin'), 'attendee', ['hotel_pin'])
Example #11
0
    def index(self, id=None):
        LIMIT = 20

        if not self.authorizer.am_authorized(c, model.Action.USER_READ, model.System):
            abort(401, _('Not authorized to see this page'))

        page = int(request.params.get('page', 1))
        c.q  = request.params.get('q', '')
        c.order_by = request.params.get('order_by', 'name')

        query = model.Session.query(model.User, func.count(model.User.id))
        if c.q:
            query = model.User.search(c.q, query)

        if c.order_by == 'edits':
            query = query.join((model.Revision, or_(
                    model.Revision.author==model.User.name,
                    model.Revision.author==model.User.openid
                    )))
            query = query.group_by(model.User)
            query = query.order_by(desc(func.count(model.User.id)))
        else:
            query = query.group_by(model.User)
            query = query.order_by(model.User.name)

        c.page = h.Page(
            collection=query,
            page=page,
            item_count=query.count(),
            items_per_page=LIMIT
            )
        return render('user/list.html')
Example #12
0
 def top_tags(cls, limit=10, returned_tag_info='object'): # by package
     assert returned_tag_info in ('name', 'id', 'object')
     tag = table('tag')
     package_tag = table('package_tag')
     package = table('package')
     if returned_tag_info == 'name':
         from_obj = [package_tag.join(tag)]
         tag_column = tag.c.name
     else:
         from_obj = None
         tag_column = package_tag.c.tag_id
     j = join(package_tag, package,
              package_tag.c.package_id == package.c.id)
     s = select([tag_column, func.count(package_tag.c.package_id)],
                 from_obj=from_obj).\
         select_from(j).\
         where(and_(package_tag.c.state=='active', package.c.private == False, package.c.state == 'active' ))
     s = s.group_by(tag_column).\
         order_by(func.count(package_tag.c.package_id).desc()).\
         limit(limit)
     res_col = model.Session.execute(s).fetchall()
     if returned_tag_info in ('id', 'name'):
         return res_col
     elif returned_tag_info == 'object':
         res_tags = [(model.Session.query(model.Tag).get(text_type(tag_id)), val) for tag_id, val in res_col]
         return res_tags
Example #13
0
    def get_urls(self):
        urls = []
        session = db.create_scoped_session()

        records = (
            session
            .query(Log.dashboard_id, func.count(Log.dashboard_id))
            .filter(and_(
                Log.dashboard_id.isnot(None),
                Log.dttm >= self.since,
            ))
            .group_by(Log.dashboard_id)
            .order_by(func.count(Log.dashboard_id).desc())
            .limit(self.top_n)
            .all()
        )
        dash_ids = [record.dashboard_id for record in records]
        dashboards = (
            session
            .query(Dashboard)
            .filter(Dashboard.id.in_(dash_ids))
            .all()
        )
        for dashboard in dashboards:
            for chart in dashboard.slices:
                urls.append(
                    get_url({'form_data': get_form_data(chart.id, dashboard)}))

        return urls
Example #14
0
    def test_events(self):
        mapper(User, users, properties={
            'addresses':dynamic_loader(mapper(Address, addresses))
        })
        sess = create_session()
        u1 = User(name='jack')
        a1 = Address(email_address='foo')
        sess.add_all([u1, a1])
        sess.flush()

        eq_(testing.db.scalar(select([func.count(1)]).where(addresses.c.user_id!=None)), 0)
        u1 = sess.query(User).get(u1.id)
        u1.addresses.append(a1)
        sess.flush()

        eq_(testing.db.execute(select([addresses]).where(addresses.c.user_id!=None)).fetchall(),
            [(a1.id, u1.id, 'foo')])

        u1.addresses.remove(a1)
        sess.flush()
        eq_(testing.db.scalar(select([func.count(1)]).where(addresses.c.user_id!=None)), 0)

        u1.addresses.append(a1)
        sess.flush()
        eq_(testing.db.execute(select([addresses]).where(addresses.c.user_id!=None)).fetchall(),
            [(a1.id, u1.id, 'foo')])

        a2 = Address(email_address='bar')
        u1.addresses.remove(a1)
        u1.addresses.append(a2)
        sess.flush()
        eq_(testing.db.execute(select([addresses]).where(addresses.c.user_id!=None)).fetchall(),
            [(a2.id, u1.id, 'bar')])
Example #15
0
  def counts(self, terms, types=None, contact_id=None,
             extra_params=None, extra_columns=None):
    """Prepare the search query, but return only count for each of
     the requested objects."""
    extra_params = extra_params or {}
    extra_columns = extra_columns or {}
    model_names = self._get_grouped_types(types, extra_params)
    query = db.session.query(
        self.record_type.type, func.count(distinct(
            self.record_type.key)), literal(""))
    query = query.filter(self.get_permissions_query(model_names))
    query = query.filter(self._get_filter_query(terms))
    query = self.search_get_owner_query(query, types, contact_id)
    query = query.group_by(self.record_type.type)
    all_extra_columns = dict(extra_columns.items() +
                             [(p, p) for p in extra_params
                              if p not in extra_columns])
    if not all_extra_columns:
      return query.all()

    # Add extra_params and extra_colums:
    for key, value in all_extra_columns.iteritems():
      extra_q = db.session.query(self.record_type.type,
                                 func.count(distinct(self.record_type.key)),
                                 literal(key))
      extra_q = extra_q.filter(self.get_permissions_query([value]))
      extra_q = extra_q.filter(self._get_filter_query(terms))
      extra_q = self.search_get_owner_query(extra_q, [value], contact_id)
      extra_q = self._add_extra_params_query(extra_q,
                                             value,
                                             extra_params.get(key, None))
      extra_q = extra_q.group_by(self.record_type.type)
      query = query.union(extra_q)
    return query.all()
Example #16
0
    def prepare(self):
        self.url = url
        hostid_filter = []
        if hasattr(self, 'host_id') and self.host_id is not None:
            hostid_filter = [Attribute.host_id == self.host_id]

        admin_down = DBSession.query(func.count(Attribute.id)).\
            filter(and_(*(
                hostid_filter + [Attribute.admin_state == State.DOWN]
            ))).first()
        self.att_total = int(admin_down[0])
        db_states = DBSession.query(
            EventState.internal_state, func.count(Attribute.id)).\
            join(Attribute).filter(and_(
                *(hostid_filter +
                  [Attribute.admin_state != State.DOWN]))).\
            group_by(EventState.internal_state)
        tmp_states = {}
        for att in db_states:
            tmp_states[att[0]] = att[1]
            self.att_total += att[1]

        self.att_states = []
        for state_val, label in State.NAMES.items():
            if state_val is None:
                self.att_states.append((label, admin_down[0]))
            else:
                try:
                    self.att_states.append((label, tmp_states[state_val]))
                except KeyError:
                    self.att_states.append((label, 0))
        super(AttributeSummary, self).prepare()
Example #17
0
    def crawler_stats(cls, crawler_id):
        stats = {}
        col = func.count(func.distinct(cls.crawler_run))
        q = db.session.query(col)
        q = q.filter(cls.crawler_id == crawler_id)
        stats['run_count'] = q.scalar()
        last_run_id, last_run_time = cls.crawler_last_run(crawler_id)

        # Check if the crawler was active very recently, if so, don't
        # allow the user to execute a new run right now.
        timeout = (datetime.utcnow() - CrawlerState.TIMEOUT)
        stats['running'] = last_run_time > timeout if last_run_time else False

        q = db.session.query(func.count(func.distinct(cls.foreign_id)))
        q = q.filter(cls.crawler_id == crawler_id)
        for section in ['last', 'all']:
            data = {}
            sq = q
            if section == 'last':
                sq = sq.filter(cls.crawler_run == last_run_id)
            okq = sq.filter(cls.status == cls.STATUS_OK)
            data['ok'] = okq.scalar() if last_run_id else 0
            failq = sq.filter(cls.status == cls.STATUS_FAIL)
            data['fail'] = failq.scalar() if last_run_id else 0
            stats[section] = data
        stats['last']['updated'] = last_run_time
        stats['last']['run_id'] = last_run_id
        return stats
Example #18
0
 def test_graceful(self):
     app = self.get_app("guest")
     
     with transaction.manager:
         DBSession.execute('DELETE FROM {};'.format(ExceptionLog.__tablename__))
         DBSession.execute('DELETE FROM {} WHERE "user" = 2;'.format(UserPermissionGroup.__tablename__))
         DBSession.execute("INSERT INTO {} VALUES (2, 'errors');".format(UserPermissionGroup.__tablename__))
         DBSession.execute("COMMIT")
     
     # Graceful
     self.make_request(
         app,
         "/dev/generate_exception?type=graceful",
         allow_graceful = "This is the message accompanying the general exception."
     )
     
     error_count = DBSession.query(func.count(ExceptionLog.id)).first()[0]
     self.assertEqual(error_count, 0,
         msg="There should be no error logs after a graceful exception")
     
     # Graceful with log
     self.make_request(
         app,
         "/dev/generate_exception?type=graceful_with_log",
         allow_graceful = "This is the message accompanying the general exception. Additionally this exception has been logged."
     )
     
     error_count = DBSession.query(func.count(ExceptionLog.id)).first()[0]
     self.assertEqual(error_count, 1,
         msg="There should be 1 error log after the graceful exception with a log")
     
     # Cleanup
     with transaction.manager:
         DBSession.execute('DELETE FROM {} WHERE "user" = 2;'.format(UserPermissionGroup.__tablename__))
         DBSession.execute("COMMIT")
Example #19
0
def item_search(classes=None, bodygroups=None, equip_regions=None, item_name=None):
    items_query = TF2Item.query.filter_by(inactive=False)
    wildcards = ["%", "_"]
    if item_name:
        if any([w in item_name for w in wildcards]):
            return
        items_query = items_query.filter(TF2Item.item_name.contains(item_name))
    if len(classes) > 0:
        for class_name in classes:
            items_query = items_query.filter(TF2Item.class_model.any(TF2ClassModel.class_name == class_name))
        sq = db.session.query(TF2ClassModel.defindex, func.count(TF2ClassModel).label("class_count")).group_by(TF2ClassModel.defindex).subquery()
        items_query = items_query.join(sq, TF2Item.defindex == sq.c.defindex)
        if len(classes) == 9:
            pass
        elif len(classes) > 1:
            items_query = items_query.filter(sq.c.class_count > 1).filter(sq.c.class_count < 9)
        elif len(classes) == 1:
            items_query = items_query.filter(sq.c.class_count == 1)
    else:
        return
    if equip_regions:
        items_query = items_query.filter(TF2Item.equip_regions.any(or_(*[TF2EquipRegion.equip_region == equip_region for equip_region in equip_regions])))
    if bodygroups:
        items_query = items_query.filter(TF2Item.bodygroups.any(or_(*[TF2BodyGroup.bodygroup == bodygroup for bodygroup in bodygroups])))
        bodygroup_count = db.session.query(schema_bodygroup.c.defindex, func.count('*').label("bg_count")).group_by(schema_bodygroup.c.defindex).subquery()
        items_query = items_query.join(bodygroup_count, TF2Item.defindex == bodygroup_count.c.defindex).filter(bodygroup_count.c.bg_count == len(bodygroups))
    else:
        items_query = items_query.filter(TF2Item.bodygroups == None)
    return items_query
Example #20
0
 def get_story(cls, limit: Optional[int] = None,
               offset: Optional[int] = None,
               sort: Optional[str] = None,
               category_id: Optional[int] = None) -> list:
     story_qs = Story.sa.query(
         Story.sa.id,
         Story.sa.name,
         Story.sa.slug,
         Story.sa.description,
         Story.sa.content,
         Category.sa.name.label('category_name')
     ).join(
         Category.sa
     )
     if category_id:
         total_count = Story.sa.query(func.count(Story.sa.id)).filter(Category.sa.id == category_id).scalar()
         story_qs = story_qs.filter(
             Category.sa.id == category_id
         )
     else:
         total_count = Story.sa.query(func.count(Story.sa.id)).scalar()
     story_qs = BaseRespository.filter_limit(story_qs, limit)
     story_qs = BaseRespository.filter_offset(story_qs, offset)
     story_qs = BaseRespository.sort_all(story_qs, sort, Story)
     try:
         result = story_qs.all()
     except NoResultFound:
         result = None
     return result, total_count
Example #21
0
def getTopUsers():
    ''' Get all the users active '''
    return meta.Session.query(Place, func.count(Place.user_id)).\
            group_by(Place.user_id). \
            having(Place.deleted_on==None). \
            order_by(desc(func.count(Place.user_id))). \
            limit(5).all()
Example #22
0
def render_receipt(user, png=False, pdf=False):
    tickets = (user.tickets
                  .filter_by(paid=True)
                  .join(TicketType)
                  .order_by(TicketType.order))

    entrance_tts_counts = (tickets.filter(TicketType.admits.in_(['full', 'kid']))
        .with_entities(TicketType, func.count(Ticket.id).label('ticket_count'))
        .group_by(TicketType).all())
    entrance_tickets_count = sum(c for tt, c in entrance_tts_counts)

    vehicle_tickets = tickets.filter(TicketType.admits.in_(['car', 'campervan'])).all()

    transferred_tickets = user.transfers_from.order_by('timestamp').all()

    tees = (tickets.filter(TicketType.fixed_id.in_(range(14, 24)))
                  .with_entities(TicketType, func.count(Ticket.id).label('ticket_count'))
                  .group_by(TicketType).all())  # t-shirts

    return render_template('receipt.html', user=user,
                           format_inline_qr=format_inline_qr,
                           format_inline_barcode=format_inline_barcode,
                           entrance_tts_counts=entrance_tts_counts,
                           entrance_tickets_count=entrance_tickets_count,
                           vehicle_tickets=vehicle_tickets,
                           transferred_tickets=transferred_tickets,
                           tees=tees,
                           pdf=pdf, png=png)
Example #23
0
def recommended_books_by_publisher(books_read_by_user, recommended_books):
    read_books_isbn_list = [book.book_isbn for book in books_read_by_user]
    # Get publishers and the number of books read by the user of the publisher
    # in descending order
    publisher_count = db.session.query(
        func.lower(Books.publisher),
        func.count(Books.publisher)
    ).filter(
        Books.isbn.in_(read_books_isbn_list)
    ).group_by(
        func.lower(Books.publisher)
    ).order_by(
        func.count(Books.publisher).desc()
    ).all()
    publisher_count_dict = {str(publisher): count for (
        publisher, count) in publisher_count}
    unread_books_by_same_publishers = db.session.query(
        Books.isbn,
        func.lower(Books.publisher)
    ).filter(
        and_(
            func.lower(Books.publisher).in_([x[0] for x in publisher_count]),
            ~Books.isbn.in_(read_books_isbn_list)
        )
    ).all()
    # Gets the books of the above publishers which are not read by the user
    for unread_book in unread_books_by_same_publishers:
        isbn = unread_book[0]
        publisher = unread_book[1]
        weight = g.user.publisher_weight * publisher_count_dict[publisher]
        if isbn not in recommended_books:
            recommended_books.update({isbn: weight})
        else:
            recommended_books[isbn] += weight
Example #24
0
def plot_gender():
    metadata, connection = setup_database()
    tables = ["visits_10min"]
    for table in tables:
        consolidated = return_joined_table(table, metadata)
        print connection.execute(select([func.count()], consolidated.c["demographics_gender"] == 2)).fetchall()
        gender_checkins = []
        for gender in (0, 1):
            gender_checkins.append([])
            for place_label in xrange(1, 11):
                query = select([func.count()], and_(consolidated.c["visits_joined_places_place_label"] == place_label, consolidated.c["demographics_gender"] == gender + 1))
                result = connection.execute(query).fetchall()
                gender_checkins[gender].append(result[0][0])
        fig, ax = plt.subplots()
        width = 0.35
        rects1 = ax.bar(xrange(1, 11), gender_checkins[0], width, color='r')
        rects2 = ax.bar([i + width for i in xrange(1, 11)], gender_checkins[1], width, color='g')
        ax.legend((rects1[0], rects2[0]), ('Men', 'Women'))
        ax.set_ylabel("Count", fontsize = 24, fontweight = 'bold')
        
        ax.set_xlabel("Place Category", fontsize=24, fontweight = 'bold')
        ax.set_title("Visits Across Gender", fontsize=32, fontweight='bold')
        xticks_values = [LABEL_PLACE_MAPPING[i] for i in xrange(1, 11)]
        xticks_values = [textwrap.fill(text,10) for text in xticks_values]
        ax.set_xticks([i + width for i in xrange(1, 11)])
        ax.set_xticklabels(xticks_values)
        #autolabel(rects1, gender_checkins[0])
        #autolabel(rects2, gender_checkins[1])
        plt.show()
Example #25
0
def cup_list(request):
    season = request.matchdict.get('season', None)
    if season:
        start = datetime.strptime(season.split('-')[0] + '-11-01', '%Y-%m-%d')
        end = datetime.strptime(season.split('-')[1] + '-11-01', '%Y-%m-%d')

    session = DBSession()

    start_date = func.min(CupDate.date).label('start_date')
    end_date = func.max(CupDate.date).label('end_date')

    cups = session.query(Cup, func.count(CupDate.cup_id).label('total'),
                              func.count(CupDate.tournament_id).label('completed'),
                              start_date,
                              end_date) \
           .join(CupDate)

    if season:
        cups = cups \
            .group_by(Cup.id) \
            .having(start_date >= start) \
            .having(start_date <= end)

    cups = cups \
            .group_by(Cup) \
            .order_by('end_date desc', Cup.name.desc()) \
            .all()

    return dict(cups=cups)
Example #26
0
    def match(self, level, limit=10):
        '''
        Returns a list of UserSkillMatch objects, in descending order of number
        of skills matched for each user.
        '''
        if db.engine.name == 'sqlite':
            agg = func.group_concat
        elif db.engine.name == 'postgresql':
            agg = func.string_agg
        else:
            raise Exception('Unknown aggregation function for DB {}'.format(
                db.engine.name))
        skills_to_learn = [
            s.name for s in
            self.skills if s.level == LEVELS['LEVEL_I_WANT_TO_LEARN']['score']
        ]
        if skills_to_learn:
            matched_users = User.query_in_deployment().\
                            add_column(agg(UserSkill.name, ',')).\
                            add_column(func.count(UserSkill.id)).\
                            filter(UserSkill.name.in_(skills_to_learn)).\
                            filter(User.id == UserSkill.user_id).\
                            filter(UserSkill.level == level).\
                            filter(UserSkill.user_id != self.id).\
                            group_by(User).\
                            order_by(func.count().desc()).\
                            limit(limit)
        else:
            matched_users = []

        for user, question_ids_by_comma, count in matched_users:
            yield UserSkillMatch(user, question_ids_by_comma.split(','))
Example #27
0
def get_licenses(session, suite=None):
    """ Count files per license filtered by `suite`

    """
    logging.debug('grouped by license summary')
    if not suite:
        q = (session.query(FileCopyright.license, Suite.suite,
                           sql_func.count(FileCopyright.id))
             .join(File)
             .join(Package)
             .join(Suite)
             .group_by(Suite.suite)
             .group_by(FileCopyright.license)
             .order_by(Suite.suite))
        return q.all()
    else:
        q = (session.query(FileCopyright.license,
                           sql_func.count(FileCopyright.id))
             .join(File)
             .join(Package))
        if suite != 'ALL':
            q = q.join(Suite) \
                 .filter(Suite.suite == suite)
        q = q.group_by(FileCopyright.license)
        return dict(q.all())
Example #28
0
def recommended_books_by_genre(books_read_by_user, recommended_books):
    read_books_isbn_list = [book.book_isbn for book in books_read_by_user]
    # Get the genres, count of genres read by user
    genre_count = db.session.query(
        BooksHasGenre.genre,
        func.count(BooksHasGenre.genre)
    ).filter(
        BooksHasGenre.book_isbn.in_(read_books_isbn_list)
    ).group_by(
        BooksHasGenre.genre
    ).order_by(
        func.count(BooksHasGenre.genre).desc()
    ).all()
    genre_count_dict = {genre: count for (genre, count) in genre_count}
    # Find all the unread books having the same genre
    unread_books_having_same_genre = db.session.query(
        Books.isbn,
        BooksHasGenre.genre
    ).filter(
        Books.isbn == BooksHasGenre.book_isbn,
        BooksHasGenre.genre.in_([x[0] for x in genre_count]),
        ~Books.isbn.in_(read_books_isbn_list)
    ).all()
    for unread_book in unread_books_having_same_genre:
        isbn = unread_book[0]
        genre = unread_book[1]
        weight = g.user.genre_weight * genre_count_dict[genre]
        if isbn not in recommended_books:
            recommended_books.update({isbn: weight})
        else:
            recommended_books[isbn] += weight
Example #29
0
    def nearest_neighbors(self, limit=10):
        '''
        Returns a list of (user, score) tuples with the closest matching
        skills.  If they haven't answered the equivalent skill question, we
        consider that a very big difference (12).

        Order is closest to least close, which is an ascending score.
        '''
        my_skills = aliased(UserSkill, name='my_skills', adapt_on_names=True)
        their_skills = aliased(UserSkill, name='their_skills', adapt_on_names=True)

        # difference we assume for user that has not answered question
        unanswered_difference = (LEVELS['LEVEL_I_CAN_DO_IT']['score'] -
                                 LEVELS['LEVEL_I_WANT_TO_LEARN']['score']) * 2

        return User.query_in_deployment().\
                add_column(((len(self.skills) - func.count(func.distinct(their_skills.id))) *
                            unanswered_difference) + \
                       func.sum(func.abs(their_skills.level - my_skills.level))).\
                filter(their_skills.user_id != my_skills.user_id).\
                filter(User.id == their_skills.user_id).\
                filter(their_skills.name == my_skills.name).\
                filter(my_skills.user_id == self.id).\
                group_by(User).\
                order_by(((len(self.skills) - func.count(func.distinct(their_skills.id)))
                          * unanswered_difference) + \
                     func.sum(func.abs(their_skills.level - my_skills.level))).\
                limit(limit)
Example #30
0
 def view_filing(self):
     req = self.request
     ses = req.session
     form = self.get_form(AddSchema)
     page = {}
     rowpage = 1
     cpage = 'page' in req.POST and req.POST['page'] or 1
     if cpage<1:
        cpage = 1
     page['current']=int(cpage)
     offset = (page['current']-1) * rowpage
     if 'data' in req.POST:
         page['row'] = DBSession.query(func.count(Filing.id)).\
                       filter(or_(Filing.tag.like('%%%s%%' % req.POST['data']),
                          Filing.nama.like('%%%s%%' % req.POST['data'])),).scalar() or 0
                          
         rows = DBSession.query(Filing).\
               filter(or_(Filing.tag.like('%%%s%%' % req.POST['data']),
                          Filing.nama.like('%%%s%%' % req.POST['data'])),).\
                          limit(rowpage).offset(offset)
     else:
         rows = DBSession.query(Filing).\
                 limit(rowpage).offset(offset)
         page['row'] = DBSession.query(func.count(Filing.id)).scalar() or 0
                 
     count = page['row'] / int(rowpage)
     page['count'] = count
     if count < page['row']/float(rowpage):
         page['count']=count+1
                 
     return dict(form=form, rows=rows, page=page)
       
Example #31
0
def process_sample(db_config, v_germlines, j_germlines, path, meta, props,
                   nproc):
    session = config.init_db(db_config)
    start = time.time()
    logger.info('Starting sample {}'.format(meta['sample_name']))
    sample = setup_sample(session, meta)

    aligner = AnchorAligner(v_germlines, j_germlines)

    # Initial VJ assignment
    alignments = concurrent.process_data(
        read_input,
        process_vdj,
        aggregate_vdj,
        nproc,
        process_args={'aligner': aligner},
        generate_args={'path': path},
    )
    logger.info('Adding noresults')
    for result in alignments['noresult']:
        add_noresults_for_vdj(session, result['vdj'], sample, result['reason'])

    alignments = alignments['success']
    if alignments:
        avg_len = (
            sum([v.v_length for v in alignments]) /
            float(len(alignments)))
        avg_mut = (
            sum([v.v_mutation_fraction for v in alignments]) /
            float(len(alignments))
        )
        sample.v_ties_mutations = avg_mut
        sample.v_ties_len = avg_len
        logger.info('Re-aligning {} sequences to V-ties: Mutations={}, '
                    'Length={}'.format(len(alignments),
                                       round(avg_mut, 2),
                                       round(avg_len, 2)))
        session.commit()
        # Realign to V-ties
        v_ties = concurrent.process_data(
            alignments,
            process_vties,
            aggregate_vties,
            nproc,
            process_args={'aligner': aligner, 'avg_len': avg_len, 'avg_mut':
                          avg_mut, 'props': props},
        )
        logger.info('Adding noresults')

        for result in funcs.periodic_commit(session, v_ties['noresult'], 100):
            add_noresults_for_vdj(session, result['alignment'].sequence,
                                  sample, result['reason'])

        logger.info('Collapsing {} buckets'.format(len(v_ties['success'])))
        session.commit()

        # TODO: Change this so we arent copying everything between processes
        concurrent.process_data(
            [list(v) for v in v_ties['success']],
            process_collapse,
            aggregate_collapse,
            nproc,
            aggregate_args={'db_config': db_config, 'sample_id': sample.id,
                            'props': props}
        )
        session.expire_all()
        session.commit()

        identified = int(session.query(
            func.sum(Sequence.copy_number)
        ).filter(
            Sequence.sample == sample
        ).scalar() or 0)
        noresults = int(session.query(
            func.count(NoResult.pk)
        ).filter(
            NoResult.sample == sample
        ).scalar() or 0)
        if identified + noresults:
            frac = int(100 * identified / float(identified + noresults))
        else:
            frac = 0
        logger.info(
            'Completed sample {} in {}m - {}/{} ({}%) identified'.format(
                sample.name,
                round((time.time() - start) / 60., 1),
                identified,
                identified + noresults,
                frac
            )
        )
    session.close()
def num_chats():
    try:
        return SESSION.query(func.count(distinct(Disable.chat_id))).scalar()
    finally:
        SESSION.close()
Example #33
0
 def count_rows(self, dbms):
     count_q = dbms.statement.with_only_columns([func.count()
                                                 ]).order_by(None)
     counted = dbms.session.execute(count_q).scalar()
     return counted
Example #34
0
class Item(db.Model):
    """
    Meant to model a specific item, like an instance of a security group.
    """
    __tablename__ = "item"
    id = Column(Integer, primary_key=True)
    region = Column(String(32), index=True)
    name = Column(
        String(303), index=True
    )  # Max AWS name = 255 chars.  Add 48 chars for ' (sg-12345678901234567 in vpc-12345678901234567)'
    arn = Column(Text(), nullable=True, index=True, unique=True)
    latest_revision_complete_hash = Column(String(32), index=True)
    latest_revision_durable_hash = Column(String(32), index=True)
    tech_id = Column(Integer,
                     ForeignKey("technology.id"),
                     nullable=False,
                     index=True)
    account_id = Column(Integer,
                        ForeignKey("account.id"),
                        nullable=False,
                        index=True)
    latest_revision_id = Column(Integer, nullable=True)
    comments = relationship("ItemComment",
                            backref="revision",
                            cascade="all, delete, delete-orphan",
                            order_by="ItemComment.date_created")
    revisions = relationship("ItemRevision",
                             backref="item",
                             cascade="all, delete, delete-orphan",
                             order_by="desc(ItemRevision.date_created)",
                             lazy="dynamic")
    issues = relationship("ItemAudit",
                          backref="item",
                          cascade="all, delete, delete-orphan")
    cloudtrail_entries = relationship("CloudTrailEntry",
                                      backref="item",
                                      cascade="all, delete, delete-orphan",
                                      order_by="CloudTrailEntry.event_time")
    issues = relationship("ItemAudit",
                          backref="item",
                          cascade="all, delete, delete-orphan",
                          foreign_keys="ItemAudit.item_id")
    exceptions = relationship("ExceptionLogs",
                              backref="item",
                              cascade="all, delete, delete-orphan")

    @hybrid_property
    def score(self):
        return db.session.query(func.cast(func.sum(
            ItemAudit.score), Integer)).filter(
                ItemAudit.item_id == self.id, ItemAudit.auditor_setting_id
                == AuditorSettings.id, AuditorSettings.disabled
                == False).one()[0] or 0

    @score.expression
    def score(cls):
        return select([func.sum(ItemAudit.score)]). \
            where(ItemAudit.item_id == cls.id). \
            where(ItemAudit.auditor_setting_id == AuditorSettings.id). \
            where(AuditorSettings.disabled == False). \
            label('item_score')

    @hybrid_property
    def unjustified_score(self):
        return db.session.query(func.cast(func.sum(
            ItemAudit.score), Integer)).filter(
                ItemAudit.item_id == self.id, ItemAudit.justified == False,
                ItemAudit.auditor_setting_id == AuditorSettings.id,
                AuditorSettings.disabled == False).one()[0] or 0

    @unjustified_score.expression
    def unjustified_score(cls):
        return select([func.sum(ItemAudit.score)]). \
            where(ItemAudit.item_id == cls.id). \
            where(ItemAudit.justified == False). \
            where(ItemAudit.auditor_setting_id == AuditorSettings.id). \
            where(AuditorSettings.disabled == False). \
            label('item_unjustified_score')

    issue_count = column_property(select([func.count(
        ItemAudit.id)]).where(ItemAudit.item_id == id).where(
            ItemAudit.auditor_setting_id == AuditorSettings.id).where(
                AuditorSettings.disabled == False),
                                  deferred=True)
Example #35
0
    def get_exam_list(self, name_kw=None, page=1, per_page=15):
        self._query = self._model.query
        if name_kw:
            self._query = self._query.filter(self._model.name.like('%' + name_kw + '%'))

        if self.user_id:
            during_exam = self.get_during_exam(self.user_id)
            during_exam = 7  # TODO Just for test.
            exam_paper_msg = {exam_paper.exam_id: exam_paper.finished_time for exam_paper in ExaminationPaper.query.filter_by(user_id=self.user_id).all()}
            if during_exam:
                exam_pagination = self._query.filter(self._model.id.in_(exam_paper_msg.keys())).order_by(
                    case((
                        (self._model.id == during_exam, 1),
                        (self._model.id != during_exam, 2)
                    )),
                    self._model.create_time.desc()).paginate(page, per_page)
            else:
                exam_pagination = self._query.filter(self._model.id.in_(exam_paper_msg.keys())).order_by('-create_time').paginate(page, per_page)
            finished_exams = [ep.exam_id for ep in ExaminationPaper.query.filter_by(user_id=self.user_id).filter(ExaminationPaper.performance!='').all()]
            total, exam_list = exam_pagination.total, [
                {
                    'id': exam.id,
                    'name': exam.name,
                    'create_time': exam.create_time,
                    'question_num': exam.question_num,
                    'finish_time': exam_paper_msg.get(exam.id),
                    'option': self.map_exam_status(exam.id, during_exam, finished_exams, exam.begin_time, exam.end_time)
                } for exam in exam_pagination.items
            ]

        else:
            # Self.user_id is None means the current_user is superadmin.
            exam_pagination = self._query.order_by('-create_time').paginate(page, per_page)
            current_page_exam_ids = [exam.id for exam in exam_pagination.items]

            finished_examinees_nums = dict(db.session.query(ExaminationPaper.exam_id, func.count(ExaminationPaper.user_id)).filter(ExaminationPaper.exam_id.in_(current_page_exam_ids)).filter(ExaminationPaper.performance!='').group_by(ExaminationPaper.exam_id).order_by(func.field(ExaminationPaper.exam_id, *current_page_exam_ids)))
            total, exam_list = exam_pagination.total, [
                {
                    'id': exam.id,
                    'name': exam.name,
                    'question_num': exam.question_num,
                    'create_time': exam.create_time,
                    'finished_examinees_num': finished_examinees_nums.get(exam.id, 0),
                } for _index, exam in enumerate(exam_pagination.items)
            ]
        return total, exam_list
Example #36
0
 def group_question_set(cls, transaction_type=TransactionType.SHOPPING.value):
     level_question_dict = {}
     level_question_group = db.session.query(cls._model.difficulty, func.group_concat(cls._model.id), func.count(cls._model.id)).filter_by(transaction_type=transaction_type).group_by(cls._model.difficulty)
     for level_question in level_question_group:
         level_question_dict[QuestionDifficulty(level_question[0]).name] = level_question[2]
     return level_question_dict
Example #37
0
 def split_question_set_for_gen_exam(cls):
     level_question_dict = {}
     level_question_group = db.session.query(cls._model.difficulty, func.group_concat(cls._model.id), func.count(cls._model.id)).group_by(cls._model.difficulty)
     for level_question in level_question_group:
         level_question_dict[level_question[0]] = level_question[1].split(',')
     return level_question_dict
Example #38
0
def threads(namespace_id, subject, from_addr, to_addr, cc_addr, bcc_addr,
            any_email, thread_public_id, started_before, started_after,
            last_message_before, last_message_after, filename, tag, limit,
            offset, view, db_session):

    if view == 'count':
        query = db_session.query(func.count(Thread.id))
    elif view == 'ids':
        query = db_session.query(Thread.public_id)
    else:
        query = db_session.query(Thread)

    thread_criteria = [Thread.namespace_id == namespace_id]
    if thread_public_id is not None:
        query = query.filter(Thread.public_id == thread_public_id)
        # TODO(emfree): at this point there should be at most one object,
        # so we could just execute the query and check additional filters
        # in Python-land.

    if started_before is not None:
        thread_criteria.append(Thread.subjectdate < started_before)

    if started_after is not None:
        thread_criteria.append(Thread.subjectdate > started_after)

    if last_message_before is not None:
        thread_criteria.append(Thread.recentdate < last_message_before)

    if last_message_after is not None:
        thread_criteria.append(Thread.recentdate > last_message_after)

    if subject is not None:
        thread_criteria.append(Thread.subject == subject)

    thread_predicate = and_(*thread_criteria)
    query = query.filter(thread_predicate)

    if tag is not None:
        tag_query = db_session.query(TagItem).join(Tag). \
            filter(or_(Tag.public_id == tag, Tag.name == tag),
                   Tag.namespace_id == namespace_id).subquery()

        query = query.join(tag_query)

    if any((from_addr, to_addr, cc_addr, bcc_addr)):
        contact_criteria = []
        if from_addr is not None:
            contact_criteria.append(
                and_(Contact.email_address == from_addr,
                     Contact.namespace_id == namespace_id,
                     MessageContactAssociation.field == 'from_addr'))

        if to_addr is not None:
            contact_criteria.append(
                and_(Contact.email_address == to_addr,
                     Contact.namespace_id == namespace_id,
                     MessageContactAssociation.field == 'to_addr'))

        if cc_addr is not None:
            contact_criteria.append(
                and_(Contact.email_address == cc_addr,
                     Contact.namespace_id == namespace_id,
                     MessageContactAssociation.field == 'cc_addr'))

        if bcc_addr is not None:
            contact_criteria.append(
                and_(Contact.email_address == bcc_addr,
                     Contact.namespace_id == namespace_id,
                     MessageContactAssociation.field == 'bcc_addr'))

        contact_query = db_session.query(Message). \
            join(MessageContactAssociation).join(Contact). \
            filter(or_(*contact_criteria)).subquery()

        query = query.join(contact_query)

    if any_email is not None:
        any_contact_query = db_session.query(Message). \
            join(MessageContactAssociation).join(Contact). \
            filter(Contact.email_address == any_email,
                   Contact.namespace_id == namespace_id).subquery()
        query = query.join(any_contact_query)

    if filename is not None:
        files_query = db_session.query(Message). \
            join(Part).join(Block). \
            filter(Block.filename == filename). \
            subquery()
        query = query.join(files_query)

    if view == 'count':
        return {"count": query.one()[0]}

    # Eager-load some objects in order to make constructing API
    # representations faster.
    if view != 'ids':
        query = query.options(
            subqueryload(Thread.messages).load_only('public_id', 'is_draft',
                                                    'from_addr', 'to_addr',
                                                    'cc_addr', 'bcc_addr'),
            subqueryload('tagitems').joinedload('tag').load_only(
                'public_id', 'name'))

    query = query.order_by(desc(Thread.recentdate)).distinct().limit(limit)
    if offset:
        query = query.offset(offset)

    if view == 'ids':
        return [x[0] for x in query.all()]

    return query.all()
Example #39
0
def _messages_or_drafts(namespace_id, drafts, subject, from_addr, to_addr,
                        cc_addr, bcc_addr, any_email, thread_public_id,
                        started_before, started_after, last_message_before,
                        last_message_after, filename, tag, limit, offset, view,
                        db_session):

    if view == 'count':
        query = db_session.query(func.count(Message.id))
    elif view == 'ids':
        query = db_session.query(Message.public_id)
    else:
        query = db_session.query(Message)

    query = query.filter(Message.namespace_id == namespace_id)

    if drafts:
        query = query.filter(Message.is_draft)
    else:
        query = query.filter(~Message.is_draft)

    has_thread_filtering_criteria = any(
        param is not None
        for param in (thread_public_id, started_before, started_after,
                      last_message_before, last_message_after, tag))

    if has_thread_filtering_criteria:
        thread_criteria = [Thread.namespace_id == namespace_id]
        if thread_public_id is not None:
            # TODO(emfree) this is a common case that we should handle
            # separately by just fetching the thread's messages and only
            # filtering more if needed.
            thread_criteria.append(Thread.public_id == thread_public_id)

        if started_before is not None:
            thread_criteria.append(Thread.subjectdate < started_before)

        if started_after is not None:
            thread_criteria.append(Thread.subjectdate > started_after)

        if last_message_before is not None:
            thread_criteria.append(Thread.recentdate < last_message_before)

        if last_message_after is not None:
            thread_criteria.append(Thread.recentdate > last_message_after)

        thread_predicate = and_(*thread_criteria)
        thread_query = db_session.query(Thread).filter(thread_predicate)
        if tag is not None:
            thread_query = thread_query.join(TagItem).join(Tag). \
                filter(or_(Tag.public_id == tag, Tag.name == tag),
                       Tag.namespace_id == namespace_id)
        thread_query = thread_query.subquery()

        query = query.join(thread_query)

    if subject is not None:
        query = query.filter(Message.subject == subject)

    if to_addr is not None:
        to_query = db_session.query(MessageContactAssociation).join(Contact). \
            filter(MessageContactAssociation.field == 'to_addr',
                   Contact.email_address == to_addr).subquery()
        query = query.join(to_query)

    if from_addr is not None:
        from_query = db_session.query(MessageContactAssociation). \
            join(Contact).filter(
                MessageContactAssociation.field == 'from_addr',
                Contact.email_address == from_addr).subquery()
        query = query.join(from_query)

    if cc_addr is not None:
        cc_query = db_session.query(MessageContactAssociation). \
            join(Contact).filter(
                MessageContactAssociation.field == 'cc_addr',
                Contact.email_address == cc_addr).subquery()
        query = query.join(cc_query)

    if bcc_addr is not None:
        bcc_query = db_session.query(MessageContactAssociation). \
            join(Contact).filter(
                MessageContactAssociation.field == 'bcc_addr',
                Contact.email_address == bcc_addr).subquery()
        query = query.join(bcc_query)

    if any_email is not None:
        any_email_query = db_session.query(
            MessageContactAssociation).join(Contact). \
            filter(Contact.email_address == any_email).subquery()
        query = query.join(any_email_query)

    if filename is not None:
        query = query.join(Part).join(Block). \
            filter(Block.filename == filename,
                   Block.namespace_id == namespace_id)

    query = query.order_by(desc(Message.received_date))

    if view == 'count':
        return {"count": query.one()[0]}

    query = query.distinct().limit(limit)

    if offset:
        query = query.offset(offset)

    if view == 'ids':
        return [x[0] for x in query.all()]

    # Eager-load related attributes to make constructing API representations
    # faster. (Thread.discriminator needed to prevent SQLAlchemy from breaking
    # on resloving inheritance.)
    query = query.options(
        subqueryload(Message.parts).joinedload(Part.block),
        joinedload(Message.thread).load_only('public_id', 'discriminator'))

    return query.all()
Example #40
0
 def get_count_query(self):
     """
         Return a the count query for the model type
     """
     return self.session.query(func.count('*')).select_from(self.model)
Example #41
0
def get_count(q):
    """Count the number of rows from an SQL query"""
    count_q = q.statement.with_only_columns([func.count()]).order_by(None)
    count = q.session.execute(count_q).scalar()
    return count
Example #42
0
def events(namespace_id, event_public_id, calendar_public_id, title,
           description, location, starts_before, starts_after, ends_before,
           ends_after, source, limit, offset, view, db_session):

    if view == 'count':
        query = db_session.query(func.count(Event.id))
    elif view == 'ids':
        query = db_session.query(Event.public_id)
    else:
        query = db_session.query(Event)

    query = query.filter(Event.namespace_id == namespace_id)

    event_criteria = []
    if event_public_id:
        query = query.filter(Event.public_id == event_public_id)

    if starts_before is not None:
        event_criteria.append(Event.start < starts_before)

    if starts_after is not None:
        event_criteria.append(Event.start > starts_after)

    if ends_before is not None:
        event_criteria.append(Event.end < ends_before)

    if ends_after is not None:
        event_criteria.append(Event.end > ends_after)

    event_predicate = and_(*event_criteria)
    query = query.filter(event_predicate)

    if calendar_public_id is not None:
        query = query.join(Calendar). \
            filter(Calendar.public_id == calendar_public_id,
                   Calendar.namespace_id == namespace_id)

    if title is not None:
        query = query.filter(Event.title.like('%{}%'.format(title)))

    if description is not None:
        query = query.filter(Event.description.like(
            '%{}%'.format(description)))

    if location is not None:
        query = query.filter(Event.location.like('%{}%'.format(location)))

    if source is not None:
        query = query.filter(Event.source == source)

    if view == 'count':
        return {"count": query.one()[0]}

    query = query.order_by(asc(Event.start)).limit(limit)

    if offset:
        query = query.offset(offset)

    if view == 'ids':
        return [x[0] for x in query.all()]
    else:
        # Eager-load some objects in order to make constructing API
        # representations faster.
        return query.all()
Example #43
0
def num_logchannels():
    try:
        return SESSION.query(func.count(distinct(GroupLogs.chat_id))).scalar()
    finally:
        SESSION.close()
Example #44
0
 def count(cls):
     return session.query(func.count(cls.id)).scalar()
Example #45
0
def events(namespace_id, event_public_id, calendar_public_id, title,
           description, location, busy, starts_before, starts_after,
           ends_before, ends_after, limit, offset, view,
           expand_recurring, show_cancelled, db_session):

    query = db_session.query(Event)

    if not expand_recurring:
        if view == 'count':
            query = db_session.query(func.count(Event.id))
        elif view == 'ids':
            query = db_session.query(Event.public_id)

    filters = [namespace_id, event_public_id, calendar_public_id,
               title, description, location, busy]
    query = filter_event_query(query, Event, *filters)

    event_criteria = []

    if starts_before is not None:
        event_criteria.append(Event.start < starts_before)

    if starts_after is not None:
        event_criteria.append(Event.start > starts_after)

    if ends_before is not None:
        event_criteria.append(Event.end < ends_before)

    if ends_after is not None:
        event_criteria.append(Event.end > ends_after)

    if not show_cancelled:
        if expand_recurring:
            event_criteria.append(Event.status != 'cancelled')
        else:
            # It doesn't make sense to hide cancelled events
            # when we're not expanding recurring events,
            # so don't do it.
            # We still need to show cancelled recurringevents
            # for those users who want to do event expansion themselves.
            event_criteria.append(
                (Event.discriminator == 'recurringeventoverride') |
                ((Event.status != 'cancelled') & (Event.discriminator !=
                                                  'recurringeventoverride')))

    event_predicate = and_(*event_criteria)
    query = query.filter(event_predicate)

    if expand_recurring:
        expanded = recurring_events(filters, starts_before, starts_after,
                                    ends_before, ends_after, db_session,
                                    show_cancelled=show_cancelled)

        # Combine non-recurring events with expanded recurring ones
        all_events = query.filter(Event.discriminator == 'event').all() + \
            expanded

        if view == 'count':
            return {"count": len(all_events)}

        all_events = sorted(all_events, key=lambda e: e.start)
        if limit:
            offset = offset or 0
            all_events = all_events[offset:offset + limit]
    else:
        if view == 'count':
            return {"count": query.one()[0]}
        query = query.order_by(asc(Event.start)).limit(limit)
        if offset:
            query = query.offset(offset)
        # Eager-load some objects in order to make constructing API
        # representations faster.
        all_events = query.all()

    if view == 'ids':
        return [x[0] for x in all_events]
    else:
        return all_events
Example #46
0
 def count(self):
     return self.session.query(func.count(Library.id))\
         .filter(Library.is_deleted == false())
Example #47
0
def threads(namespace_id, subject, from_addr, to_addr, cc_addr, bcc_addr,
            any_email, thread_public_id, started_before, started_after,
            last_message_before, last_message_after, filename, in_, unread,
            starred, limit, offset, view, db_session):

    if view == 'count':
        query = db_session.query(func.count(Thread.id))
    elif view == 'ids':
        query = db_session.query(Thread.public_id)
    else:
        query = db_session.query(Thread)

    filters = [Thread.namespace_id == namespace_id]
    if thread_public_id is not None:
        filters.append(Thread.public_id == thread_public_id)

    if started_before is not None:
        filters.append(Thread.subjectdate < started_before)

    if started_after is not None:
        filters.append(Thread.subjectdate > started_after)

    if last_message_before is not None:
        filters.append(Thread.recentdate < last_message_before)

    if last_message_after is not None:
        filters.append(Thread.recentdate > last_message_after)

    if subject is not None:
        filters.append(Thread.subject == subject)

    query = query.filter(*filters)

    if from_addr is not None:
        from_query = db_session.query(Message.thread_id). \
            join(MessageContactAssociation).join(Contact).filter(
                Contact.email_address == from_addr,
                Contact.namespace_id == namespace_id,
                MessageContactAssociation.field == 'from_addr').subquery()
        query = query.filter(Thread.id.in_(from_query))

    if to_addr is not None:
        to_query = db_session.query(Message.thread_id). \
            join(MessageContactAssociation).join(Contact).filter(
                Contact.email_address == to_addr,
                Contact.namespace_id == namespace_id,
                MessageContactAssociation.field == 'to_addr').subquery()
        query = query.filter(Thread.id.in_(to_query))

    if cc_addr is not None:
        cc_query = db_session.query(Message.thread_id). \
            join(MessageContactAssociation).join(Contact).filter(
                Contact.email_address == cc_addr,
                Contact.namespace_id == namespace_id,
                MessageContactAssociation.field == 'cc_addr').subquery()
        query = query.filter(Thread.id.in_(cc_query))

    if bcc_addr is not None:
        bcc_query = db_session.query(Message.thread_id). \
            join(MessageContactAssociation).join(Contact).filter(
                Contact.email_address == bcc_addr,
                Contact.namespace_id == namespace_id,
                MessageContactAssociation.field == 'bcc_addr').subquery()
        query = query.filter(Thread.id.in_(bcc_query))

    if any_email is not None:
        any_contact_query = db_session.query(Message.thread_id). \
            join(MessageContactAssociation).join(Contact). \
            filter(Contact.email_address.in_(any_email),
                   Contact.namespace_id == namespace_id).subquery()
        query = query.filter(Thread.id.in_(any_contact_query))

    if filename is not None:
        files_query = db_session.query(Message.thread_id). \
            join(Part).join(Block). \
            filter(Block.filename == filename,
                   Block.namespace_id == namespace_id). \
            subquery()
        query = query.filter(Thread.id.in_(files_query))

    if in_ is not None:
        category_filters = [Category.name == in_, Category.display_name == in_]
        try:
            valid_public_id(in_)
            category_filters.append(Category.public_id == in_)
        except InputError:
            pass
        category_query = db_session.query(Message.thread_id). \
            prefix_with('STRAIGHT_JOIN'). \
            join(Message.messagecategories).join(MessageCategory.category). \
            filter(Category.namespace_id == namespace_id,
                   or_(*category_filters)).subquery()
        query = query.filter(Thread.id.in_(category_query))

    if unread is not None:
        read = not unread
        unread_query = db_session.query(Message.thread_id).filter(
            Message.namespace_id == namespace_id,
            Message.is_read == read).subquery()
        query = query.filter(Thread.id.in_(unread_query))

    if starred is not None:
        starred_query = db_session.query(Message.thread_id).filter(
            Message.namespace_id == namespace_id,
            Message.is_starred == starred).subquery()
        query = query.filter(Thread.id.in_(starred_query))

    if view == 'count':
        return {"count": query.one()[0]}

    # Eager-load some objects in order to make constructing API
    # representations faster.
    if view != 'ids':
        expand = (view == 'expanded')
        query = query.options(*Thread.api_loading_options(expand))

    query = query.order_by(desc(Thread.recentdate)).limit(limit)

    if offset:
        query = query.offset(offset)

    if view == 'ids':
        return [x[0] for x in query.all()]

    return query.all()
try:
    _db = production_session()
    registrar = IdentifierResolutionRegistrar(_db)

    log.info('Finding unresolved identifiers')
    data_source = DataSource.lookup(_db, DataSource.INTERNAL_PROCESSING)
    unresolved_qu = Identifier.missing_coverage_from(
        _db, [], data_source,
        operation=CoverageRecord.RESOLVE_IDENTIFIER_OPERATION,
        count_as_covered=CoverageRecord.SUCCESS
    ).filter(CoverageRecord.id != None)

    log.info('Finding unaffiliated identifiers without a collection')
    unresolved_and_unaffiliated = unresolved_qu.outerjoin(Identifier.collections)\
        .group_by(Identifier.id).having(func.count(Collection.id)==0)\
        .options(lazyload(Identifier.licensed_through)).distinct()

    if unresolved_and_unaffiliated.count() > 1:
        # Use a bulk insert to add them all to the unaffiliated_collection.
        log.info('Giving all unaffiliated identifiers a collection')
        unaffiliated_collection, ignore = MockResolver.unaffiliated_collection(_db)
        _db.execute(
            collections_identifiers.insert(),
            [
                dict(
                    collection_id=unaffiliated_collection.id,
                    identifier_id=identifier.id
                ) for identifier in unresolved_and_unaffiliated
            ]
        )
def num_stickers_filter_chats():
    try:
        return SESSION.query(func.count(distinct(StickersFilters.chat_id))).scalar()
    finally:
        SESSION.close()
Example #50
0
def messages_or_drafts(namespace_id, drafts, subject, from_addr, to_addr,
                       cc_addr, bcc_addr, any_email, thread_public_id,
                       started_before, started_after, last_message_before,
                       last_message_after, received_before, received_after,
                       filename, in_, unread, starred, limit, offset, view,
                       db_session):
    # Warning: complexities ahead. This function sets up the query that gets
    # results for the /messages API. It loads from several tables, supports a
    # variety of views and filters, and is performance-critical for the API. As
    # such, it is not super simple.
    #
    # We bake the generated query to avoid paying query compilation overhead on
    # every request. This requires some attention: every parameter that can
    # vary between calls *must* be inserted via bindparam(), or else the first
    # value passed will be baked into the query and reused on each request.
    # Subqueries (on contact tables) can't be properly baked, so we have to
    # call query.spoil() on those code paths.

    param_dict = {
        'namespace_id': namespace_id,
        'drafts': drafts,
        'subject': subject,
        'from_addr': from_addr,
        'to_addr': to_addr,
        'cc_addr': cc_addr,
        'bcc_addr': bcc_addr,
        'any_email': any_email,
        'thread_public_id': thread_public_id,
        'received_before': received_before,
        'received_after': received_after,
        'started_before': started_before,
        'started_after': started_after,
        'last_message_before': last_message_before,
        'last_message_after': last_message_after,
        'filename': filename,
        'in_': in_,
        'unread': unread,
        'starred': starred,
        'limit': limit,
        'offset': offset
    }

    if view == 'count':
        query = bakery(lambda s: s.query(func.count(Message.id)))
    elif view == 'ids':
        query = bakery(lambda s: s.query(Message.public_id))
    else:
        query = bakery(lambda s: s.query(Message))

        # Sometimes MySQL doesn't pick the right index. In the case of a
        # regular /messages query, ix_message_ns_id_is_draft_received_date
        # is the best index because we always filter on
        # the namespace_id, is_draft and then order by received_date.
        # For other "exotic" queries, we let the MySQL query planner
        # pick the right index.
        if all(v is None for v in [subject, from_addr, to_addr, cc_addr,
                                   bcc_addr, any_email, thread_public_id,
                                   filename, in_, started_before,
                                   started_after, last_message_before,
                                   last_message_after]):
            query += lambda q: q.with_hint(
                Message,
                'FORCE INDEX (ix_message_ns_id_is_draft_received_date)',
                'mysql')

    query += lambda q: q.join(Thread, Message.thread_id == Thread.id)
    query += lambda q: q.filter(
        Message.namespace_id == bindparam('namespace_id'),
        Message.is_draft == bindparam('drafts'))

    if subject is not None:
        query += lambda q: q.filter(Message.subject == bindparam('subject'))

    if unread is not None:
        query += lambda q: q.filter(Message.is_read != bindparam('unread'))

    if starred is not None:
        query += lambda q: q.filter(Message.is_starred == bindparam('starred'))

    if thread_public_id is not None:
        query += lambda q: q.filter(
            Thread.public_id == bindparam('thread_public_id'))

    # TODO: deprecate thread-oriented date filters on message endpoints.
    if started_before is not None:
        query += lambda q: q.filter(
            Thread.subjectdate < bindparam('started_before'),
            Thread.namespace_id == bindparam('namespace_id'))

    if started_after is not None:
        query += lambda q: q.filter(
            Thread.subjectdate > bindparam('started_after'),
            Thread.namespace_id == bindparam('namespace_id'))

    if last_message_before is not None:
        query += lambda q: q.filter(
            Thread.recentdate < bindparam('last_message_before'),
            Thread.namespace_id == bindparam('namespace_id'))

    if last_message_after is not None:
        query += lambda q: q.filter(
            Thread.recentdate > bindparam('last_message_after'),
            Thread.namespace_id == bindparam('namespace_id'))

    if received_before is not None:
        query += lambda q: q.filter(
            Message.received_date <= bindparam('received_before'))

    if received_after is not None:
        query += lambda q: q.filter(
            Message.received_date > bindparam('received_after'))

    if to_addr is not None:
        query.spoil()
        to_query = db_session.query(MessageContactAssociation.message_id). \
            join(Contact).filter(
                MessageContactAssociation.field == 'to_addr',
                Contact.email_address == to_addr,
                Contact.namespace_id == bindparam('namespace_id')).subquery()
        query += lambda q: q.filter(Message.id.in_(to_query))

    if from_addr is not None:
        query.spoil()
        from_query = db_session.query(MessageContactAssociation.message_id). \
            join(Contact).filter(
                MessageContactAssociation.field == 'from_addr',
                Contact.email_address == from_addr,
                Contact.namespace_id == bindparam('namespace_id')).subquery()
        query += lambda q: q.filter(Message.id.in_(from_query))

    if cc_addr is not None:
        query.spoil()
        cc_query = db_session.query(MessageContactAssociation.message_id). \
            join(Contact).filter(
                MessageContactAssociation.field == 'cc_addr',
                Contact.email_address == cc_addr,
                Contact.namespace_id == bindparam('namespace_id')).subquery()
        query += lambda q: q.filter(Message.id.in_(cc_query))

    if bcc_addr is not None:
        query.spoil()
        bcc_query = db_session.query(MessageContactAssociation.message_id). \
            join(Contact).filter(
                MessageContactAssociation.field == 'bcc_addr',
                Contact.email_address == bcc_addr,
                Contact.namespace_id == bindparam('namespace_id')).subquery()
        query += lambda q: q.filter(Message.id.in_(bcc_query))

    if any_email is not None:
        query.spoil()
        any_email_query = db_session.query(
            MessageContactAssociation.message_id).join(Contact). \
            filter(Contact.email_address.in_(any_email),
                   Contact.namespace_id == bindparam('namespace_id')). \
            subquery()
        query += lambda q: q.filter(Message.id.in_(any_email_query))

    if filename is not None:
        query += lambda q: q.join(Part).join(Block). \
            filter(Block.filename == bindparam('filename'),
                   Block.namespace_id == bindparam('namespace_id'))

    if in_ is not None:
        query.spoil()
        category_filters = [Category.name == bindparam('in_'),
                            Category.display_name == bindparam('in_')]
        try:
            valid_public_id(in_)
            category_filters.append(Category.public_id == bindparam('in_id'))
            # Type conversion and bindparams interact poorly -- you can't do
            # e.g.
            # query.filter(or_(Category.name == bindparam('in_'),
            #                  Category.public_id == bindparam('in_')))
            # because the binary conversion defined by Category.public_id will
            # be applied to the bound value prior to its insertion in the
            # query. So we define another bindparam for the public_id:
            param_dict['in_id'] = in_
        except InputError:
            pass
        query += lambda q: q.prefix_with('STRAIGHT_JOIN'). \
            join(Message.messagecategories).join(MessageCategory.category). \
            filter(Category.namespace_id == namespace_id,
                   or_(*category_filters))

    if view == 'count':
        res = query(db_session).params(**param_dict).one()[0]
        return {"count": res}

    query += lambda q: q.order_by(desc(Message.received_date))
    query += lambda q: q.limit(bindparam('limit'))
    if offset:
        query += lambda q: q.offset(bindparam('offset'))

    if view == 'ids':
        res = query(db_session).params(**param_dict).all()
        return [x[0] for x in res]

    # Eager-load related attributes to make constructing API representations
    # faster. Note that we don't use the options defined by
    # Message.api_loading_options() here because we already have a join to the
    # thread table. We should eventually try to simplify this.
    query += lambda q: q.options(
        contains_eager(Message.thread),
        subqueryload(Message.messagecategories).joinedload('category'),
        subqueryload(Message.parts).joinedload(Part.block),
        subqueryload(Message.events))

    prepared = query(db_session).params(**param_dict)
    return prepared.all()
Example #51
0
@event.listens_for(LikedPost, 'after_insert', propagate=True)
def send_post_to_socket(mapper, connection, target):
    target.post.send_to_changes()


@event.listens_for(LikedPost, 'after_update', propagate=True)
def send_post_to_socket_ts(mapper, connection, target):
    if not inspect(target).unmodified_intersection(('tombstone_date')):
        target.db.expire(target.post, ['like_count'])
        target.post.send_to_changes()


_lpt = LikedPost.__table__
_actt = Action.__table__
Content.like_count = column_property(
    select([func.count(_actt.c.id)]).where(
        (_lpt.c.id == _actt.c.id)
        & (_lpt.c.post_id == Content.__table__.c.id)
        & (_actt.c.type == LikedPost.__mapper_args__['polymorphic_identity'])
        & (_actt.c.tombstone_date == None)).correlate_except(_actt, _lpt))


class ExpandPost(UniqueActionOnPost):
    """
    An expansion action on a post.
    """
    __mapper_args__ = {'polymorphic_identity': 'version:ExpandPost_P'}

    verb = 'expanded'

Example #52
0
def _find_duplicates(
    session: Session, table: type[Statistics | StatisticsShortTerm]
) -> tuple[list[int], list[dict]]:
    """Find duplicated statistics."""
    subquery = (
        session.query(
            table.start,
            table.metadata_id,
            literal_column("1").label("is_duplicate"),
        )
        .group_by(table.metadata_id, table.start)
        .having(func.count() > 1)
        .subquery()
    )
    query = (
        session.query(table)
        .outerjoin(
            subquery,
            (subquery.c.metadata_id == table.metadata_id)
            & (subquery.c.start == table.start),
        )
        .filter(subquery.c.is_duplicate == 1)
        .order_by(table.metadata_id, table.start, table.id.desc())
        .limit(1000 * MAX_ROWS_TO_PURGE)
    )
    duplicates = execute(query)
    original_as_dict = {}
    start = None
    metadata_id = None
    duplicate_ids: list[int] = []
    non_identical_duplicates_as_dict: list[dict] = []

    if not duplicates:
        return (duplicate_ids, non_identical_duplicates_as_dict)

    def columns_to_dict(duplicate: type[Statistics | StatisticsShortTerm]) -> dict:
        """Convert a SQLAlchemy row to dict."""
        dict_ = {}
        for key in duplicate.__mapper__.c.keys():
            dict_[key] = getattr(duplicate, key)
        return dict_

    def compare_statistic_rows(row1: dict, row2: dict) -> bool:
        """Compare two statistics rows, ignoring id and created."""
        ignore_keys = ["id", "created"]
        keys1 = set(row1).difference(ignore_keys)
        keys2 = set(row2).difference(ignore_keys)
        return keys1 == keys2 and all(row1[k] == row2[k] for k in keys1)

    for duplicate in duplicates:
        if start != duplicate.start or metadata_id != duplicate.metadata_id:
            original_as_dict = columns_to_dict(duplicate)
            start = duplicate.start
            metadata_id = duplicate.metadata_id
            continue
        duplicate_as_dict = columns_to_dict(duplicate)
        duplicate_ids.append(duplicate.id)
        if not compare_statistic_rows(original_as_dict, duplicate_as_dict):
            non_identical_duplicates_as_dict.append(
                {"duplicate": duplicate_as_dict, "original": original_as_dict}
            )

    return (duplicate_ids, non_identical_duplicates_as_dict)
Example #53
0
    def post(self, task_name):
        participation = self.current_user
        try:
            task = self.contest.get_task(task_name)
        except KeyError:
            raise tornado.web.HTTPError(404)

        self.fallback_page = ["tasks", task.name, "submissions"]

        # Alias for easy access
        contest = self.contest

        # Enforce maximum number of submissions
        try:
            if contest.max_submission_number is not None:
                submission_c = self.sql_session\
                    .query(func.count(Submission.id))\
                    .join(Submission.task)\
                    .filter(Task.contest == contest)\
                    .filter(Submission.participation == participation)\
                    .scalar()
                if submission_c >= contest.max_submission_number and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("You have reached the maximum limit of "
                               "at most %d submissions among all tasks.") %
                        contest.max_submission_number)
            if task.max_submission_number is not None:
                submission_t = self.sql_session\
                    .query(func.count(Submission.id))\
                    .filter(Submission.task == task)\
                    .filter(Submission.participation == participation)\
                    .scalar()
                if submission_t >= task.max_submission_number and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("You have reached the maximum limit of "
                               "at most %d submissions on this task.") %
                        task.max_submission_number)
        except ValueError as error:
            self._send_error(self._("Too many submissions!"), error.message)
            return

        # Enforce minimum time between submissions
        try:
            if contest.min_submission_interval is not None:
                last_submission_c = self.sql_session.query(Submission)\
                    .join(Submission.task)\
                    .filter(Task.contest == contest)\
                    .filter(Submission.participation == participation)\
                    .order_by(Submission.timestamp.desc())\
                    .first()
                if last_submission_c is not None and \
                        self.timestamp - last_submission_c.timestamp < \
                        contest.min_submission_interval and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("Among all tasks, you can submit again "
                               "after %d seconds from last submission.") %
                        contest.min_submission_interval.total_seconds())
            # We get the last submission even if we may not need it
            # for min_submission_interval because we may need it later,
            # in case this is a ALLOW_PARTIAL_SUBMISSION task.
            last_submission_t = self.sql_session.query(Submission)\
                .filter(Submission.task == task)\
                .filter(Submission.participation == participation)\
                .order_by(Submission.timestamp.desc())\
                .first()
            if task.min_submission_interval is not None:
                if last_submission_t is not None and \
                        self.timestamp - last_submission_t.timestamp < \
                        task.min_submission_interval and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("For this task, you can submit again "
                               "after %d seconds from last submission.") %
                        task.min_submission_interval.total_seconds())
        except ValueError as error:
            self._send_error(self._("Submissions too frequent!"),
                             error.message)
            return

        # Required files from the user.
        required = set([sfe.filename for sfe in task.submission_format])

        # Ensure that the user did not submit multiple files with the
        # same name.
        if any(len(filename) != 1 for filename in self.request.files.values()):
            self._send_error(self._("Invalid submission format!"),
                             self._("Please select the correct files."))
            return

        # If the user submitted an archive, extract it and use content
        # as request.files. But only valid for "output only" (i.e.,
        # not for submissions requiring a programming language
        # identification).
        if len(self.request.files) == 1 and \
                self.request.files.keys()[0] == "submission":
            if any(filename.endswith(".%l") for filename in required):
                self._send_error(self._("Invalid submission format!"),
                                 self._("Please select the correct files."),
                                 task)
                return
            archive_data = self.request.files["submission"][0]
            del self.request.files["submission"]

            # Create the archive.
            archive = Archive.from_raw_data(archive_data["body"])

            if archive is None:
                self._send_error(
                    self._("Invalid archive format!"),
                    self._("The submitted archive could not be opened."))
                return

            # Extract the archive.
            unpacked_dir = archive.unpack()
            for name in archive.namelist():
                filename = os.path.basename(name)
                if filename not in required:
                    continue
                body = open(os.path.join(unpacked_dir, name), "r").read()
                self.request.files[filename] = [{
                    'filename': filename,
                    'body': body
                }]

            archive.cleanup()

        # This ensure that the user sent one file for every name in
        # submission format and no more. Less is acceptable if task
        # type says so.
        task_type = get_task_type(dataset=task.active_dataset)
        provided = set(self.request.files.keys())
        if not (required == provided or (task_type.ALLOW_PARTIAL_SUBMISSION
                                         and required.issuperset(provided))):
            self._send_error(self._("Invalid submission format!"),
                             self._("Please select the correct files."))
            return

        # Add submitted files. After this, files is a dictionary indexed
        # by *our* filenames (something like "output01.txt" or
        # "taskname.%l", and whose value is a couple
        # (user_assigned_filename, content).
        files = {}
        for uploaded, data in self.request.files.iteritems():
            files[uploaded] = (data[0]["filename"], data[0]["body"])

        # Read the submission language provided in the request; we
        # integrate it with the language fetched from the previous
        # submission (if we use it) and later make sure it is
        # recognized and allowed.
        submission_lang = self.get_argument("language", None)
        need_lang = any(
            our_filename.find(".%l") != -1 for our_filename in files)

        # If we allow partial submissions, we implicitly recover the
        # non-submitted files from the previous submission (if it has
        # the same programming language of the current one), and put
        # them in file_digests (since they are already in FS).
        file_digests = {}
        # if task_type.ALLOW_PARTIAL_SUBMISSION and \
        #         last_submission_t is not None and \
        #         (submission_lang is None or
        #          submission_lang == last_submission_t.language):
        #     submission_lang = last_submission_t.language
        #     for filename in required.difference(provided):
        #         if filename in last_submission_t.files:
        #             file_digests[filename] = \
        #                 last_submission_t.files[filename].digest

        # Throw an error if task needs a language, but we don't have
        # it or it is not allowed / recognized.
        if need_lang:
            error = None
            if submission_lang is None:
                error = self._("Cannot recognize the submission language.")
            elif submission_lang not in contest.languages:
                error = self._("Language %s not allowed in this contest.") \
                    % submission_lang
            if error is not None:
                self._send_error(self._("Invalid submission!"), error)
                return

        # Check if submitted files are small enough.
        if sum([len(f[1])
                for f in files.values()]) > config.max_submission_length:
            self._send_error(
                self._("Submission too big!"),
                self._("Size of each submission must be at most %d bytes.") %
                config.max_submission_length)
            return

        # All checks done, submission accepted.

        # Attempt to store the submission locally to be able to
        # recover a failure.
        if config.submit_local_copy:
            try:
                path = os.path.join(
                    config.submit_local_copy_path.replace(
                        "%s", config.data_dir), participation.user.username)
                if not os.path.exists(path):
                    os.makedirs(path)
                # Pickle in ASCII format produces str, not unicode,
                # therefore we open the file in binary mode.
                with io.open(
                        os.path.join(path,
                                     "%d" % make_timestamp(self.timestamp)),
                        "wb") as file_:
                    pickle.dump((self.contest.id, participation.user.id,
                                 task.id, files), file_)
            except Exception as error:
                logger.warning("Submission local copy failed.", exc_info=True)

        # We now have to send all the files to the destination...
        try:
            for filename in files:
                digest = self.application.service.file_cacher.put_file_content(
                    files[filename][1],
                    "Submission file %s sent by %s at %d." %
                    (filename, participation.user.username,
                     make_timestamp(self.timestamp)))
                file_digests[filename] = digest

        # In case of error, the server aborts the submission
        except Exception as error:
            logger.error("Storage failed! %s", error)
            self._send_error(self._("Submission storage failed!"),
                             self._("Please try again."))
            return

        # All the files are stored, ready to submit!
        logger.info("All files stored for submission sent by %s",
                    participation.user.username)

        # Only set the official bit when the user can compete and we are not in
        # analysis mode.
        official = self.r_params["actual_phase"] == 0

        submission = Submission(self.timestamp,
                                submission_lang,
                                task=task,
                                participation=participation,
                                official=official)

        for filename, digest in file_digests.items():
            self.sql_session.add(File(filename, digest, submission=submission))
        self.sql_session.add(submission)
        self.sql_session.commit()

        # Store some data out of the session so we can close it before issuing
        # RPCs.
        username = participation.user.username
        submission_id = submission.id
        logger.metric("submission_added",
                      submission_id=submission.id,
                      language=submission.language,
                      task_id=task.id,
                      participant_id=participation.id,
                      value=1)

        self.sql_session.close()
        try:
            random_service(self.application.service.evaluation_services)\
                .new_submission(submission_id=submission_id)
        except IndexError:
            logger.error("No evaluation services found. "
                         "Leaving the submission to be "
                         "discovered by sweep. ")
        self.application.service.add_notification(
            username, self.timestamp, self._("Submission received"),
            self._("Your submission has been received "
                   "and is currently being evaluated."), NOTIFICATION_SUCCESS)

        # The argument (encripted submission id) is not used by CWS
        # (nor it discloses information to the user), but it is useful
        # for automatic testing to obtain the submission id).
        self.redirect(
            self.contest_url(*self.fallback_page,
                             submission_id=encrypt_number(submission.id)))
Example #54
0
def get_top_likes_albums(session):
    return (session.query(Album, func.count(
        Like.user_id)).join(Like).group_by(Album).order_by(
            desc(func.count(Like.user_id)))[:10])
Example #55
0
 def comment_count(self):
     return (select([func.count(
         Comment.id)]).where(Comment.post_id == self.id).where(
             Comment.object_type == 'post').label("comments_count"))
Example #56
0
def list_boards():

# Query the DB to get the list of boards and also a count of entries per board.

   boards_queried = (
      db.session.query(Board.id, Board.name, Board.length, Board.volume, Board.shaper, Board.display_name, Board.description, func.count(Entry.id).label('entry_count'), func.avg(Entry.score).label('avg_score'))
      .join(Entry, Board.name == Entry.board)
      .group_by(Board.id, Board.name, Board.length, Board.volume, Board.shaper, Board.display_name, Board.description)
)


# This is last-surfed dates for the other table at the bottom of the page. 

   boards_last_used = db.session.query((func.max(Entry.time_out)).label('time_out'), Entry.board).group_by(Entry.board)

   boards_last_used_dict_list = []

   boards_queried_dict_list = []

   boards_for_template_list = []

   # The results of the SQLAlchemy query above are returned as keyed-tuples. Need to begin by converting each of these to a dictionary. 

   for row in boards_queried.all():
        queried_row = row._asdict()
        boards_queried_dict_list.append(queried_row)


   for row in boards_last_used.all():
        last_used_row = row._asdict()
        boards_last_used_dict_list.append(last_used_row)
  
   # Now that I have a dictionary for each value, strip the HH:MM out of the "time_out" label. 

   for last_used_row in boards_last_used_dict_list:
        last_used_row['time_out'] = (last_used_row['time_out'].date())

   # Merge the boards_last_used and the boards_queried based on whether board_name and name match. 

   for queried_row in boards_queried_dict_list:
        for last_used_row in boards_last_used_dict_list:
            if queried_row['name'] == last_used_row['board']:
                queried_row.update(last_used_row)
                boards_for_template_list.append(queried_row)

# Pass the results into the template

   return render_template("boards.html",rows = boards_for_template_list)
Example #57
0
 def view_user_count(self):
     return (select([func.count(distinct(
         PageView.user_id))]).where(PageView.object_id == self.id).where(
             PageView.object_type == 'post').label("view_user_count"))
Example #58
0
    def get(self, task_name):
        participation = self.current_user

        try:
            task = self.contest.get_task(task_name)
        except KeyError:
            raise tornado.web.HTTPError(404)

        if len(task.submission_format) == 0:
            raise tornado.web.HTTPError(404)

        submissions = self.sql_session.query(Submission)\
            .filter(Submission.participation == participation)\
            .filter(Submission.task == task)\
            .options(joinedload(Submission.token))\
            .options(joinedload(Submission.results)) \
            .order_by(Submission.timestamp) \
            .all()

        last_submission_result = None
        for submission in submissions:
            if submission.official:
                sr = submission.get_result(task.active_dataset)
                if sr is not None and sr.scored():
                    last_submission_result = sr

        submissions_left_contest = None
        if self.contest.max_submission_number is not None:
            submissions_c = self.sql_session\
                .query(func.count(Submission.id))\
                .join(Submission.task)\
                .filter(Task.contest == self.contest)\
                .filter(Submission.participation == participation)\
                .scalar()
            submissions_left_contest = \
                self.contest.max_submission_number - submissions_c

        submissions_left_task = None
        if task.max_submission_number is not None:
            submissions_left_task = \
                task.max_submission_number - len(submissions)

        submissions_left = submissions_left_contest
        if submissions_left_task is not None and \
            (submissions_left_contest is None or
             submissions_left_contest > submissions_left_task):
            submissions_left = submissions_left_task

        # Make sure we do not show negative value if admins changed
        # the maximum
        if submissions_left is not None:
            submissions_left = max(0, submissions_left)

        self.render("task_submissions.html",
                    task=task,
                    submissions=submissions,
                    submissions_left=submissions_left,
                    submissions_download_allowed=self.contest.
                    submissions_download_allowed,
                    last_submission_result=last_submission_result,
                    **self.r_params)
Example #59
0
 def view_user_count(self):
     return (db.session.query(func.count(distinct(
         PageView.user_id))).filter(PageView.object_id == self.id).filter(
             PageView.object_type == 'post').scalar())
Example #60
0
 def vote_count(self):
     return (select([func.count(Vote.id)
                     ]).where(Vote.object_id == self.id).where(
                         Vote.object_type == 'post').label("vote_count"))