Пример #1
0
    def by_filter(cls, session, opts, **kwargs):
        """
        Get packages from given filters.

        :param session: SQLAlchemy session
        :type session: :class:`sqlalchemy.Session`

        :param opts: filtering options
        :type opts: `dict

        :return: package instances
        :rtype: generator of :class:`pyshop.models.Package`
        """
        where = []

        if opts.get('local_only'):
            where.append(cls.local == True)

        if opts.get('names'):
            where.append(cls.name.in_(opts['names']))

        if opts.get('classifiers'):
            ids = [c.id for c in opts.get('classifiers')]
            cls_pkg = classifier__package
            qry = session.query(cls_pkg.c.package_id,
                                func.count('*'))
            qry = qry.filter(cls_pkg.c.classifier_id.in_(ids))
            qry = qry.group_by(cls_pkg.c.package_id)
            qry = qry.having(func.count('*') >= len(ids))
            where.append(cls.id.in_([r[0] for r in qry.all()]))

        return cls.find(session, where=where, **kwargs)
Пример #2
0
    def clientstats(db):
        startdatestring = request.json['startdate']
        enddatestring = request.json['enddate']
        dateformat = '%d-%m-%Y'

        startdate = datetime.strptime(startdatestring, dateformat)
        enddate = datetime.strptime(enddatestring, dateformat)

        selectedRequestsBrowser = db.query(Request.browsername, func.count(Request.browsername)).filter_by(isPageview=True).filter(Request.datetime.between(startdate, enddate)).group_by(Request.browsername)
        selectedRequestsPlatform = db.query(Request.platformname, func.count(Request.platformname)).filter_by(isPageview=True).filter(Request.datetime.between(startdate, enddate)).group_by(Request.platformname)

        returnDict = {'postdata' : request.json, 'returndata' : {'browserstats' : [], 'platformstats' : []}}

        for selectedRequestBrowser in selectedRequestsBrowser:
            tempDict = {'browser' : selectedRequestBrowser[0],
            'pageviews' : selectedRequestBrowser[1]}

            returnDict['returndata']['browserstats'].append(tempDict)

        for selectedRequestPlatform in selectedRequestsPlatform:
            tempDict = {'platform' : selectedRequestPlatform[0],
            'pageviews' : selectedRequestPlatform[1]}

            returnDict['returndata']['platformstats'].append(tempDict)

        return returnDict
Пример #3
0
 def dataset_counts(cls, datasets_q):
     sq = datasets_q.subquery()
     q = select([cls.code, func.count(cls.dataset_id)],
                group_by=cls.code,
                order_by=func.count(cls.dataset_id).desc())
     q = q.where(cls.dataset_id == sq.c.id)
     return db.session.bind.execute(q).fetchall()
Пример #4
0
    def test_migrate_batch_stureg(self):
        batch_guid = '2bb942b9-75cf-4055-a67a-8b9ab53a9dfc'
        batch = {UdlStatsConstants.REC_ID: '6',
                 UdlStatsConstants.BATCH_GUID: batch_guid, UdlStatsConstants.TENANT: self.__tenant,
                 UdlStatsConstants.SCHEMA_NAME: None, Constants.DEACTIVATE: False,
                 UdlStatsConstants.LOAD_TYPE: LoadType.STUDENT_REGISTRATION,
                 UdlStatsConstants.BATCH_OPERATION: 's',
                 UdlStatsConstants.SNAPSHOT_CRITERIA: '{"reg_system_id": "015247bd-058c-48cd-bb4d-f6cffe5b40c1", "academic_year": 2015}'}
        self.insert_into_udl_stats(batch[UdlStatsConstants.REC_ID], batch_guid, self.__tenant, batch[UdlStatsConstants.LOAD_TYPE])

        preprod_conn = EdMigrateSourceConnection(tenant=get_unittest_preprod_tenant_name())
        count_to_source_query = select([func.count()]).select_from(preprod_conn.get_table(Constants.STUDENT_REG))
        count_to_be_inserted = preprod_conn.execute(count_to_source_query).fetchall()[0][0]
        self.assertEqual(10, count_to_be_inserted)

        prod_conn = EdMigrateDestConnection(tenant=get_unittest_preprod_tenant_name())
        student_reg_table = prod_conn.get_table(Constants.STUDENT_REG)
        count_query = select([func.count()]).select_from(student_reg_table)
        count_before = prod_conn.execute(count_query).fetchall()[0][0]
        self.assertEqual(2581, count_before)

        count_snapshot_query = select([func.count()], student_reg_table.c.academic_year == 2015).select_from(student_reg_table)
        count_to_be_deleted = prod_conn.execute(count_snapshot_query).fetchall()[0][0]
        self.assertEqual(1217, count_to_be_deleted)

        rtn = migrate_batch(batch)
        self.assertTrue(rtn)

        expected_count_after = count_before - count_to_be_deleted + count_to_be_inserted
        count_after = prod_conn.execute(count_query).fetchall()[0][0]
        self.assertEqual(expected_count_after, count_after)
Пример #5
0
 def get_community_tags(self, item=None, limit=None):
     """Returns community tags for an item."""
     # Get item-tag association class.
     item_class = item.__class__
     item_tag_assoc_class = self.get_tag_assoc_class(item_class)
     if not item_tag_assoc_class:
         return []
     # Build select statement.
     cols_to_select = [item_tag_assoc_class.table.c.tag_id, func.count('*')]
     from_obj = item_tag_assoc_class.table.join(item_class.table).join(galaxy.model.Tag.table)
     where_clause = (self.get_id_col_in_item_tag_assoc_table(item_class) == item.id)
     order_by = [func.count("*").desc()]
     group_by = item_tag_assoc_class.table.c.tag_id
     # Do query and get result set.
     query = select(columns=cols_to_select,
                    from_obj=from_obj,
                    whereclause=where_clause,
                    group_by=group_by,
                    order_by=order_by,
                    limit=limit)
     result_set = self.sa_session.execute(query)
     # Return community tags.
     community_tags = []
     for row in result_set:
         tag_id = row[0]
         community_tags.append(self.get_tag_by_id(tag_id))
     return community_tags
Пример #6
0
 def dataset_counts(cls, datasets):
     ds_ids = [d.id for d in datasets]
     if not len(ds_ids):
         return []
     q = select([cls.code, func.count(cls.dataset_id)],
                cls.dataset_id.in_(ds_ids), group_by=cls.code,
                order_by=func.count(cls.dataset_id).desc())
     return db.session.bind.execute(q).fetchall()
Пример #7
0
 def top_workouts(self):
     this_client_id = self.id
     counts_time_length_workouts = db.session.query(func.count(Time_Length_Workout.id),Time_Length_Workout.name).group_by(Time_Length_Workout.name).all()
     counts_time_length_workouts = [ (w[0],w[1],'Time_Length_Workout') for w in counts_time_length_workouts]
     counts_rep_set_workouts = db.session.query(func.count(Rep_Set_Workout.id),Rep_Set_Workout.name).group_by(Rep_Set_Workout.name).all()
     counts_rep_set_workouts = [ (w[0],w[1],'Rep_Set_Workout') for w in counts_time_length_workouts]
     counts_workouts = counts_time_length_workouts + counts_rep_set_workouts
     top_workouts = sorted(counts_workouts, key=lambda x: x[0])
     return top_workouts[0:3]  
Пример #8
0
 def calculate_score(self, educatives, seminar):
     total_score = 0
     score = self.score
     ken_count = session.query(Educative.hug_id, Educative.ken_id, func.count(Educative.ken_id)).group_by(Educative.hug_id, Educative.ken_id).all()
     for ken in ken_count:
         total_score += (ken[2] ** 2) * score
     second_ken_count = session.query(Educative.hug_id, Educative.second_ken_id, func.count(Educative.second_ken_id)).group_by(Educative.hug_id, Educative.second_ken_id).all()
     for second_ken in second_ken_count:
         total_score += (second_ken[2] ** 2) * score
     return total_score
Пример #9
0
    def PageContent(self):
        results = dict()
        
        storySubselect = self.session.query(func.count(Story.idstory).label('storycount')).group_by(Story.categoryid).add_column(Story.categoryid).subquery()
        charSubselect = self.session.query(func.count(Character.idcharacter).label('charactercount')).group_by(Character.categoryid).add_column(Character.categoryid).subquery()

        query = self.session.query(Category, storySubselect.c.storycount, charSubselect.c.charactercount).order_by(Category.name)
        query = query.join((storySubselect, Category.idcategory == storySubselect.c.categoryid))
        query = query.join((charSubselect, Category.idcategory == charSubselect.c.categoryid))
        
        results['categories'] = [(r[0].name, r[1], r[2], self.request.route_url("search", category=r[0].idcategory)) for r in query]
        numPerRow = 3
        results['numPerRow'] = numPerRow
        results['rows'] = (len(results['categories'])/numPerRow) + 1
        return results
Пример #10
0
def quotas(connection, target):
    allowed = target.owner.org.tier.cron_jobs
    current = connection.scalar(
        select([func.count(distinct(Job.id))]).where(and_(
            Job.owner_id == target.owner_id,
            Job.enabled == True)))  # noqa
    return allowed, current
Пример #11
0
 def test_migrate_fact_asmt_outcome_vw(self):
     preprod_conn = EdMigrateSourceConnection(tenant=get_unittest_preprod_tenant_name())
     prod_conn = EdMigrateDestConnection(tenant=get_unittest_prod_tenant_name())
     batch_guid = "288220EB-3876-41EB-B3A7-F0E6C8BD013B"
     fact_asmt_outcome_table = prod_conn.get_table(Constants.FACT_ASMT_OUTCOME)
     query = select([func.count().label('asmt_outcome_vw_rec_ids')], fact_asmt_outcome_table.c.asmt_outcome_vw_rec_id.in_([1000000776, 1000001034, 1000001112]))
     query_c = query.where(fact_asmt_outcome_table.c.rec_status == 'C')
     query_d = query.where(fact_asmt_outcome_table.c.rec_status == 'D')
     query_I = query.where(fact_asmt_outcome_table.c.rec_status == 'I')
     rset = prod_conn.execute(query_c)
     row = rset.fetchone()
     self.assertEqual(3, row['asmt_outcome_vw_rec_ids'])
     rset.close()
     delete_count, insert_count = migrate_table(batch_guid, None, preprod_conn, prod_conn,
                                                'fact_asmt_outcome_vw', False)
     self.assertEqual(3, delete_count)
     self.assertEqual(3, insert_count)
     rset = prod_conn.execute(query_c)
     row = rset.fetchone()
     self.assertEqual(0, row['asmt_outcome_vw_rec_ids'])
     rset.close()
     rset = prod_conn.execute(query_d)
     row = rset.fetchone()
     self.assertEqual(3, row['asmt_outcome_vw_rec_ids'])
     rset.close()
     # The deactivation count will be always zero in unit test
     rset = prod_conn.execute(query_I)
     row = rset.fetchone()
     self.assertEqual(0, row['asmt_outcome_vw_rec_ids'])
     rset.close()
Пример #12
0
    def test_migrate_student_reg(self):
        Unittest_with_edcore_sqlite.setUpClass(EdMigrateDestConnection.get_datasource_name(TestMigrate.test_tenant),
                                               use_metadata_from_db=False)
        preprod_conn = EdMigrateSourceConnection(tenant=get_unittest_preprod_tenant_name())
        prod_conn = EdMigrateDestConnection(tenant=get_unittest_prod_tenant_name())
        batch_guid = "0aa942b9-75cf-4055-a67a-8b9ab53a9dfc"
        student_reg_table = preprod_conn.get_table(Constants.STUDENT_REG)
        get_query = select([student_reg_table.c.student_reg_rec_id]).order_by(student_reg_table.c.student_reg_rec_id)
        count_query = select([func.count().label('student_reg_rec_ids')],
                             student_reg_table.c.student_reg_rec_id.in_(range(15541, 15551)))

        rset = preprod_conn.execute(get_query)
        row = rset.fetchall()
        self.assertEqual(10, len(row))
        self.assertListEqual([(15541,), (15542,), (15543,), (15544,), (15545,), (15546,), (15547,), (15548,), (15549,), (15550,)],
                             row)
        rset.close()

        rset = prod_conn.execute(count_query)
        row = rset.fetchone()
        self.assertEqual(0, row['student_reg_rec_ids'])
        rset.close()

        delete_count, insert_count = migrate_table(batch_guid, None, preprod_conn, prod_conn, 'student_reg', False)
        self.assertEqual(0, delete_count)
        self.assertEqual(10, insert_count)

        rset = prod_conn.execute(count_query)
        row = rset.fetchone()
        self.assertEqual(10, row['student_reg_rec_ids'])
        rset.close()
Пример #13
0
 def calculate_score(self, educatives, seminar):
     total_score = 0
     gender_count = session.query(Educative.hug_id, Educative.gender, func.count(Educative.gender)).group_by(Educative.hug_id, Educative.gender).all()
     for hug_gender in gender_count:
         count = hug_gender[2]
         total_score += (count ** 2) * self.score
     return total_score
Пример #14
0
    def bioconcept_interaction_starter():
        nex_session = nex_session_maker()

        id_to_bioentity = dict([(x.id, x) for x in nex_session.query(Locus).all()])
        id_to_bioconcept = dict([(x.id, x) for x in nex_session.query(Bioconcept).all()])

        bad_interactors = set([x.id for x in nex_session.query(Bioconcept).filter(Bioconcept.format_name.in_({'vegetative_growth',
                                                                                                                'haploinsufficient',
                                                                                                                'viable',
                                                                                                                'heat_sensitivity',
                                                                                                                'toxin_resistance',
                                                                                                                'chronological_lifespan',
                                                                                                                'competitive_fitness',
                                                                                                                'desiccation_resistance',
                                                                                                                'resistance_to_cycloheximide',
                                                                                                                'resistance_to_methyl_methanesulfonate',
                                                                                                                'resistance_to_sirolimus',
                                                                                                                'vacuolar_morphology',
                                                                                                                'inviable'})).all()])

        #Go
        for row in nex_session.query(Goevidence.locus_id, Goevidence.go_id, func.count(Goevidence.id)).filter(Goevidence.annotation_type != 'computational').group_by(Goevidence.locus_id, Goevidence.go_id).all():
            go = id_to_bioconcept[row[1]]
            locus = id_to_bioentity[row[0]]
            if go.go_aspect == 'biological process' and go.id not in bad_interactors:
                yield {'interaction_type': 'GO', 'evidence_count': row[2], 'bioentity': locus, 'interactor': go}

        #Phenotype
        for row in nex_session.query(Phenotypeevidence.locus_id, Phenotypeevidence.phenotype_id, func.count(Phenotypeevidence.id)).group_by(Phenotypeevidence.locus_id, Phenotypeevidence.phenotype_id).all():
            observable = id_to_bioconcept[row[1]].observable
            locus = id_to_bioentity[row[0]]
            if observable.id not in bad_interactors:
                yield {'interaction_type': 'PHENOTYPE', 'evidence_count': row[2], 'bioentity': locus, 'interactor': observable}

        nex_session.close()
Пример #15
0
    def build_query(cls, session, join=None, where=None, order_by=None,
                    limit=None, offset=None, count=None):

        if count is not None:
            query = session.query(func.count(count)).select_from(cls)
        else:
            query = session.query(cls)

        if join:
            if isinstance(join, (list, tuple)):
                for j in join:
                    query = query.join(j)
            else:
                query = query.join(join)

        if where:
            for filter in where:
                query = query.filter(filter)

        if order_by is not None:
            if isinstance(order_by, (list, tuple)):
                query = query.order_by(*order_by)
            else:
                query = query.order_by(order_by)
        if limit:
            query = query.limit(limit)
        if offset:
            query = query.offset(offset)
        return query
Пример #16
0
    def get_sub_entries_count(self, identity = None, exclude_identity = None):
        """
Returns the number of child entries of this instance.

:param offset: SQLAlchemy query offset
:param limit: SQLAlchemy query limit
:param identity: Count only DataLinker children of the given identity
:param exclude_identity: Count only DataLinker children not be of the given identity

:return: (int) Number of child entries
:since:  v0.2.00
        """

        if (self.log_handler is not None): self.log_handler.debug("#echo(__FILEPATH__)# -{0!r}.get_sub_entries_count()- (#echo(__LINE__)#)", self, context = "pas_datalinker")

        if (identity is None and exclude_identity is None): _return = self.get_data_attributes("sub_entries")['sub_entries']
        elif (identity is not None and exclude_identity is not None): raise ValueException("Defining both an identity and to exclude an identity is not supported")
        else:
            with self:
                db_query = self.local.db_instance.rel_children.with_entities(sql.count(_DbDataLinker.id))

                if (identity is not None): db_query = db_query.filter(_DbDataLinker.identity == identity)
                elif (exclude_identity is not None): db_query = db_query.filter(_DbDataLinker.identity != exclude_identity)

                db_query = DataLinker._db_apply_id_site_condition(db_query)

                _return = db_query.scalar()
            #
        #

        return _return
Пример #17
0
def get_searches_ordered_by_label_matches(
        labels,
        searcher_role,
        searching_for_role,
        offset_number=0,
        max_number=10):

    if offset_number > 0:
        offset_number *= max_number

    labelnames = [label.name.lower() for label in labels]
    query = store.session.query(Search, func.count(Label.id).label('matches'))
    query = query.join(Search.labels)
    query = query.filter(func.lower(Label.name).in_(labelnames))
    query = query.filter(Search.active==True)
    query = query.filter(Search.searcher_role==searcher_role)
    query = query.filter(Search.searching_for_role==searching_for_role)
    query = query.join(Search.searcher_user)
    query = query.filter(User.email_confirmed==True)
    query = query.filter(User.active==True)
    query = query.group_by(Search.id)
    query = query.order_by('matches DESC')
    searches_and_count = query.offset(offset_number).limit(max_number)
    searches = [sc[0] for sc in searches_and_count]
    return searches
Пример #18
0
def get_number_solved_subquery():
    """
    Get a subquery that returns how many teams have solved a challenge.

    Example usage:

        .. code-block:: python

            number_of_solved_subquery = get_number_solved_subquery()
            challenge_query = (DBSession.query(Challenge,
                                               number_of_solved_subquery)

    Here we query for a list of all challenges and additionally fetch the
    number of times it has been solved. This subquery will use the outer
    challenge to correlate on, so make sure to provide one or this query
    makes no sense.
    """
    from fluxscoreboard.models import dynamic_challenges
    query = (DBSession.query(func.count('*')).
             filter(Challenge.id == Submission.challenge_id).
             correlate(Challenge).as_scalar())
    for name, module in dynamic_challenges.registry.items():
        dyn_cnt = module.solved_count_query().filter(Challenge.module == name)
        query = query + dyn_cnt.as_scalar()
    return query.label("solved_count")
Пример #19
0
    def get_staging_demographic_counts(self):
        demographics = ['hispanicorlatinoethnicity', 'americanindianoralaskanative', 'asian', 'blackorafricanamerican',
                        'nativehawaiianorotherpacificislander', 'white', 'demographicracetwoormoreraces',
                        'ideaindicator', 'lepstatus', 'section504status', 'economicdisadvantagestatus',
                        'migrantstatus']
        results_dict = {}
        with get_udl_connection() as conn:
            stg_outcome = conn.get_table('stg_sbac_asmt_outcome')
            for entry in demographics:
                query = select([func.count(stg_outcome.c[entry])], from_obj=stg_outcome).where(stg_outcome.c[entry].in_(['Y', 'y', 'yes']))
                result = conn.execute(query)
                for row in result:
                    demo_count = row[0]

                results_dict[entry] = demo_count

        corrleated_results = {
            'dmg_eth_hsp': results_dict['hispanicorlatinoethnicity'],
            'dmg_eth_ami': results_dict['americanindianoralaskanative'],
            'dmg_eth_asn': results_dict['asian'],
            'dmg_eth_blk': results_dict['blackorafricanamerican'],
            'dmg_eth_pcf': results_dict['nativehawaiianorotherpacificislander'],
            'dmg_eth_wht': results_dict['white'],
            'dmg_eth_2om': results_dict['demographicracetwoormoreraces'],
            'dmg_prg_iep': results_dict['ideaindicator'],
            'dmg_prg_lep': results_dict['lepstatus'],
            'dmg_prg_504': results_dict['section504status'],
            'dmg_sts_ecd': results_dict['economicdisadvantagestatus'],
            'dmg_sts_mig': results_dict['migrantstatus'],
        }

        return corrleated_results
Пример #20
0
 def rank(self):
     inner_team = aliased(Team)
     return (DBSession.query(func.count('*') + 1).
             select_from(inner_team).
             filter(inner_team.score > Team.score).
             correlate(Team).
             label('rank'))
Пример #21
0
 def get_stats(cls):
     return dict(
         DBSession.query(Provider.pk, func.count(cls.ref_pk).label('c'))
         .filter(Provider.pk == cls.provider_pk)
         .group_by(Provider.pk)
         .order_by(desc('c'))
         .all())
Пример #22
0
def is_activated_by_callfilter_id(session, callfilter_id):
    return (session.query(func.count(Callfiltermember.active))
            .join((Callfilter, Callfilter.id == Callfiltermember.callfilterid))
            .filter(and_(Callfiltermember.callfilterid == callfilter_id,
                         Callfiltermember.bstype == 'secretary',
                         Callfiltermember.active == 1))
            .first()[0])
def get_extract_assessment_item_and_raw_count_query(params, extract_type):
    """
    private method to generate SQLAlchemy object or sql code for extraction of
    students for item level/raw data

    :param params: for query parameters asmt_year, asmt_type, asmt_subject,
    asmt_grade
    :param extract_type: Type of extraction: Item Level, Raw Data Level,
    Student Assessment
    """
    state_code = params.get(Constants.STATECODE)

    with EdCoreDBConnection(state_code=state_code) as connector:
        fact_asmt_outcome_vw = connector.get_table(
            Constants.FACT_ASMT_OUTCOME_VW)
        query = select_with_context([
            func.count().label(Constants.COUNT)],
            from_obj=[fact_asmt_outcome_vw],
            permission=get_required_permission(extract_type),
            state_code=state_code)

        query = _assessment_item_and_raw_where_clause_builder(
            query, fact_asmt_outcome_vw, params)

    return query
Пример #24
0
def quotas(connection, target):
    allowed = target.org.tier.nodes
    current = connection.scalar(
        select([func.count(distinct(Node.id))]).where(
            Node.org_id == target.org.id).where(Node.enabled == True))  # noqa

    return allowed, current
Пример #25
0
 def count(self):
     """ Get a count of the number of distinct objects. """
     q = select(from_obj=self.join(self.alias))
     q = self.filter(q, partial=True)
     q = q.column(func.count(func.distinct(self.alias.c.id)).label('num'))
     rp = db.session.execute(q)
     return rp.fetchone().num
Пример #26
0
    def store_item(self, item, spider): # {{{
        item_table = self.table

        dbobj = {}
        for k,v in item.iteritems():
            if isinstance(v, list) or isinstance(v, dict):
                dbobj[k] = json.dumps(v)
            else:
                dbobj[k] = v

        conn = self.engine.connect()

        page_url = item['page_url']
        where = item_table.c.page_url == page_url
        sel = select([func.count(item_table.c.id)]).where(where)
        cnt = conn.execute(sel).scalar()
        if cnt:
            assert cnt==1, 'More than one item with page_url %s' % page_url
            upd = item_table.update().where(where)
            conn.execute(upd, dbobj)
            status = 'updated'
        else:
            ins = item_table.insert()
            conn.execute(ins, dbobj)
            status = 'inserted'
        log.msg('Item %s into %s: %s' % (status, item_table.name, page_url), level=log.DEBUG, spider=spider)

        conn.close()
Пример #27
0
 def get_number_comments(self, status=None):
     if not status:
         return Session.query(sa.func.count(Comment.id)).filter_by(change_id=self.id).first()[0]
     
     
     date = Session.query(func.max(CommentStatus.created_date).label('date'), Comment.id)
     date = date.filter(CommentStatus.comment_id==Comment.id).filter(Comment.change_id==self.id)
     date = date.group_by(CommentStatus.comment_id, Comment.id)
     subq = date.subquery()
     
     q = Session.query(func.count(Comment.id)).outerjoin((subq, subq.c.id==Comment.id))
     q = q.outerjoin((CommentStatus, CommentStatus.comment_id==Comment.id))
     q = q.filter(Comment.change_id==self.id).filter(Comment.status!=STATUS_REMOVED)
     q = q.filter(Comment.in_reply_to_id==None)
     
     if status == STATUS_OPEN:
         q = q.filter(sa.or_(
             CommentStatus.id==None,
             sa.and_(CommentStatus.created_date==subq.columns.date, CommentStatus.status==status)
         ))
         return q.scalar()
     else:
         q = q.filter(
             sa.and_(CommentStatus.created_date==subq.columns.date, CommentStatus.status==status)
         )
         return q.scalar()
def pivot(query, column_expressions, column_labels):
    query = query.group_by(query.statement)
    columns = []
    for i in range(0, len(column_expressions)):
        column_expressions[i] = func.count(case([(column_expressions[i], 1)]))
        columns.append(column_expressions[i].label(column_labels[i]))
    return query.add_columns(*columns)
Пример #29
0
 def num_entries(self, conditions="1=1"):
     """ Return the count of entries on the dataset fact table having the
     dimension set to a value matching the filter given by ``conditions``.
     """
     query = select([func.count(func.distinct(self.column_alias))],
                    conditions)
     rp = self.dataset.bind.execute(query)
     return rp.fetchone()[0]
Пример #30
0
    def by_filter(cls, session, opts, **kwargs):
        where = []

        if opts.get('local_only'):
            where.append(cls.local==True)

        if opts.get('classifiers'):
            ids = [c.id for c in opts.get('classifiers')]
            cls_pkg = classifier__package
            qry = session.query(cls_pkg.c.package_id,
                                func.count('*'))
            qry = qry.filter(cls_pkg.c.classifier_id.in_(ids))
            qry = qry.group_by(cls_pkg.c.package_id)
            qry = qry.having(func.count('*') >= len(ids))
            where.append(cls.id.in_([r[0] for r in qry.all()]))

        return cls.find(session, where=where, **kwargs)
Пример #31
0
def childnodes(request):
    if request.params.get('t') == 'select2':
        query = DBSession.query(Languoid.id, Languoid.name, Languoid.level)\
            .filter(icontains(Languoid.name, request.params.get('q')))
        total = query.count()
        ms = LanguoidsMultiSelect(request, None, None, url='x')
        return dict(
            results=[ms.format_result(l) for l in query.limit(100)],
            context={},
            more=total > 500)

    query = DBSession.query(
        Languoid.pk,
        Languoid.id,
        Languoid.name,
        Languoid.level,
        func.count(TreeClosureTable.child_pk).label('children'))\
        .filter(Language.pk == TreeClosureTable.parent_pk)\
        .filter(Language.active == true())

    if request.params.get('node'):
        query = query.filter(Languoid.father_pk == int(request.params['node']))
    else:
        # narrow down selection of top-level nodes in the tree:
        query = query.filter(Languoid.father_pk == null())
        if request.params.get('q'):
            query = query.filter(Language.name.contains(request.params.get('q')))

    query = query.group_by(
        Languoid.pk,
        Languoid.id,
        Languoid.name,
        Languoid.level).order_by(Language.name)
    return [{
        'label': ('%s (%s)' % (l.name, l.children - 1))
            if l.children > 1 else l.name,
        'glottocode': l.id,
        'lname': l.name,
        'id': l.pk,
        'level': l.level.value,
        #'children': l.children
        'load_on_demand': l.children > 1} for l in query]
Пример #32
0
 def do_collector(self, input: str):
     try:
         arguments = self._process_input(KisConsoleConsoleCommand.collector,
                                         input)
         if len(arguments) == 0:
             with self._engine.session_scope() as session:
                 workspace_id = session.query(Workspace.id).filter_by(
                     name=self._workspace).scalar_subquery()
                 query = session.query(CollectorName.name.label("collector"),
                                       CollectorName.type.label("type"),
                                       func.coalesce(Command.status, CommandStatus.pending.name).label("status"),
                                       CollectorName.priority,
                                       func.count(Command.status).label("count")) \
                     .outerjoin((Command, CollectorName.commands)) \
                     .filter(Command.workspace_id == workspace_id) \
                     .group_by(CollectorName.name,
                               CollectorName.type,
                               func.coalesce(Command.status, CommandStatus.pending.name),
                               CollectorName.priority)
                 df = pandas.read_sql(query.statement, query.session.bind)
                 df["status"] = df["status"].apply(
                     lambda x: CommandStatus(x).name)
                 df["type"] = df["type"].apply(
                     lambda x: CollectorType(x).name)
                 results = pandas.pivot_table(
                     df,
                     index=["collector", "type", "priority"],
                     columns=["status"],
                     values="count",
                     aggfunc=numpy.sum,
                     fill_value=0).sort_values(by="priority")
                 print(results)
         elif arguments[0] == CollectorArgumentEnum.current:
             if self._producer_thread.current_collector:
                 print(self._producer_thread.current_collector.name)
             else:
                 print("none")
         elif arguments[0] == CollectorArgumentEnum.remaining:
             for item in self._producer_thread.remaining_collectors:
                 print(item)
     except Exception:
         traceback.print_exc(file=sys.stderr)
Пример #33
0
def index():
    if current_user.is_authenticated:
        page = request.args.get('page', 1, type=int)
        per_page = current_app.config['ALBUMY_PHOTO_PER_PAGE']
        pagination = Photo.query \
            .join(Follow, Follow.followed_id == Photo.author_id) \
            .filter(Follow.follower_id == current_user.id) \
            .order_by(Photo.timestamp.desc()) \
            .paginate(page, per_page)
        photos = pagination.items
    else:
        pagination = None
        photos = None
    tags = Tag.query.join(Tag.photos).group_by(Tag.id).order_by(
        func.count(Photo.id).desc()).limit(10)
    return render_template('main/index.html',
                           pagination=pagination,
                           photos=photos,
                           tags=tags,
                           Collect=Collect)
Пример #34
0
def feed_ratingindex():
    off = request.args.get("offset") or 0
    entries = calibre_db.session.query(db.Ratings, func.count('books_ratings_link.book').label('count'),
                               (db.Ratings.rating / 2).label('name')) \
        .join(db.books_ratings_link)\
        .join(db.Books)\
        .filter(calibre_db.common_filters()) \
        .group_by(text('books_ratings_link.rating'))\
        .order_by(db.Ratings.rating).all()

    pagination = Pagination(
        (int(off) / (int(config.config_books_per_page)) + 1),
        config.config_books_per_page, len(entries))
    element = list()
    for entry in entries:
        element.append(FeedObject(entry[0].id, "{} Stars".format(entry.name)))
    return render_xml_template('feed.xml',
                               listelements=element,
                               folder='opds.feed_ratings',
                               pagination=pagination)
Пример #35
0
def get_skills(min_frequency, max_frequency):
    count = func.count()

    filters = []
    if min_frequency:
        filters.append(count >= min_frequency)
    if max_frequency:
        filters.append(count <= max_frequency)

    skill_counts = db.session.query(Skill.name, count)\
                             .group_by(Skill.name)\
                             .having(and_(*filters))\
                             .all()

    res = []
    for skill in skill_counts:
        skill_json = {"name": skill[0], "frequency": skill[1]}
        res.append(skill_json)

    return res
Пример #36
0
def seed_subscriptions():
    subscriptions = db.session.query(func.count(
        UserSubscription.following_id)).scalar()
    subscriptions_to_seed = 30
    user_ids = [user[0] for user in db.session.query(User.id).all()]
    for i in range(subscriptions, subscriptions_to_seed):
        follower_id = random.choice(user_ids)
        already_following_ids = [
            u[0] for u in UserSubscription.query.filter(
                UserSubscription.follower_id == follower_id).with_entities(
                    UserSubscription.following_id)
        ]
        already_following_ids.append(follower_id)
        user_ids_not_following = set(user_ids).difference(
            set(already_following_ids))
        following_id = random.sample(user_ids_not_following, 1)[0]
        db.session.add(
            UserSubscription(following_id=following_id,
                             follower_id=follower_id))
    db.session.commit()
Пример #37
0
 def build_sub_query(self, extra_columns=[], where_guid=None):
     '''
     build select columns based on request
     '''
     query = select(extra_columns + [self._fact_asmt_outcome_vw.c.asmt_subject.label(Constants.ASMT_SUBJECT),
                                     self._fact_asmt_outcome_vw.c.inst_hier_rec_id,
                                     func.count().label(Constants.TOTAL),
                                     self._fact_asmt_outcome_vw.c.asmt_perf_lvl.label(Constants.LEVEL)])\
         .where(and_(self._fact_asmt_outcome_vw.c.state_code == self._state_code,
                     self._fact_asmt_outcome_vw.c.asmt_type == self._asmt_type,
                     self._fact_asmt_outcome_vw.c.rec_status == Constants.CURRENT,
                     or_(self._fact_asmt_outcome_vw.c.administration_condition == Constants.ADMINISTRATION_CONDITION_STANDARDIZED, self._fact_asmt_outcome_vw.c.administration_condition == null()),
                     self._fact_asmt_outcome_vw.c.asmt_year == self._asmt_year))\
         .group_by(self._fact_asmt_outcome_vw.c.asmt_subject,
                   self._fact_asmt_outcome_vw.c.inst_hier_rec_id,
                   self._fact_asmt_outcome_vw.c.asmt_perf_lvl)
     if where_guid is not None:
         query = query.where(and_(where_guid))
     return apply_filter_to_query(query, self._fact_asmt_outcome_vw,
                                  self._dim_student, self._filters)
Пример #38
0
def dashboard_def():

    names = set()
    for numes in db.sqlalchemy_session.query(Message.MessageSender).distinct():
        names.add(numes.MessageSender)
        print("type names", numes.MessageSender)
    names_converted = tuple(names)

    values = []
    for i in names:
        q = db.sqlalchemy_session.query(func.count(
            Message.MessageSender)).filter(Message.MessageSender == i).one()
        list_of_max = list(q)
        new_index = list_of_max[0]
        values.append(new_index)

    bar = go.Bar(
        x=names_converted,
        y=values,
    )
    scatter = go.Scatter(
        x=names_converted,
        y=values,
    )

    # messages = db.sqlalchemy_session.query(Message).order_by(Message.MessageContent)
    # calories = [dish.calories_amount for dish in messages]
    # bar = go.Bar(
    #     x=calories,
    #     y=[dish.dishname for dish in messages]
    # )

    # scatter = go.Scatter(
    #     x=calories,
    #     y=[dish.dishname for dish in messages],
    # )
    ids = [0, 1]
    data = [scatter, bar]
    graphJSON = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)

    return render_template('dashboard.html', graphJSON=graphJSON, ids=ids)
Пример #39
0
def seed_users():
    role, created = get_or_create(
        db.session,
        Role,
        defaults={'description': 'for standard users'},
        name='ROLE_USER')
    db.session.commit()
    non_standard_user_ids = db.session.query(User.id) \
        .filter(~User.roles.any(id=role.id)).all()

    all_users_count = db.session.query(func.count(User.id)).all()[0][0]
    all_users_count = db.session.query(User.id).count()

    # User.query.filter(User.roles.any(UserRole.role_id.in_([1,2]))).count()
    standard_users_count = db.session.query(User).filter(
        User.roles.any(UserRole.role_id.in_([role.id]))).count()
    standard_users_count = db.session.query(User.id).filter(
        User.roles.any(id=role.id)).count()

    users_to_seed = 23
    users_to_seed -= standard_users_count
    sys.stdout.write('[+] Seeding %d users\n' % users_to_seed)

    for i in range(0, users_to_seed):
        profile = fake.profile(fields='username,mail,name')
        username = profile['username']
        # fake.first_name() fake.first_name_male() fake.first_name_female(), same for last_name()
        first_name = profile['name'].split()[0]
        last_name = profile['name'].split()[1]
        email = profile['mail']
        password = bcrypt.generate_password_hash('password')
        user = User(username=username,
                    first_name=first_name,
                    last_name=last_name,
                    email=email,
                    password=password)
        user.roles.append(role)
        db.session.add(user)
        db.session.commit()

    db.session.commit()
Пример #40
0
    def search(self, query, **kwargs):
        """
        Searches in the configured media directories given a query. It uses the
        built-in SQLite index if available. If any directory has changed since
        the last scan then it will be indexed again and the up-to-date results
        will be returned.
        """

        session = self._get_db_session()
        results = {}

        for media_dir in self.dirs:
            self.logger.info('Searching {} for "{}"'.format(media_dir, query))
            dir_record = self._get_or_create_dir_entry(session, media_dir)

            if self._has_directory_changed_since_last_indexing(dir_record):
                self.logger.info(
                    '{} has changed since last indexing, '.format(media_dir) +
                    're-indexing')

                self.scan(media_dir, session=session, dir_record=dir_record)

            query_tokens = [
                _.lower()
                for _ in re.split(self._filename_separators, query.strip())
            ]

            for file_record in session.query(MediaFile.path). \
                    join(MediaFileToken). \
                    join(MediaToken). \
                    filter(MediaToken.token.in_(query_tokens)). \
                    group_by(MediaFile.path). \
                    having(func.count(MediaFileToken.token_id) >= len(query_tokens)):
                if os.path.isfile(file_record.path):
                    results[file_record.path] = {
                        'url': 'file://' + file_record.path,
                        'title': os.path.basename(file_record.path),
                        'size': os.path.getsize(file_record.path)
                    }

        return results.values()
Пример #41
0
def seed_orders():
    orders_count = db.session.query(func.count(Order.id)).scalar()
    orders_to_seed = 31
    addresses = db.session.query(Address).options(load_only('id',
                                                            'user_id')).all()
    products = db.session.query(Product).options(
        load_only('id', 'name', 'slug', 'price')).all()

    for i in range(orders_count, orders_to_seed):
        address = random.choice(addresses)
        tracking_number = fake.uuid4()
        order_status = fake.random_int(min=0, max=2)
        user_id = address.user_id
        order = Order(tracking_number=tracking_number,
                      order_status=order_status,
                      address_id=address.id,
                      user_id=user_id)

        db.session.add(order)
        '''
        this is to save the order now, so I can have the id to be used in order items
        or the other way is to comment flush(), order_id=order.id, and session.add(oi).
        Instead use order.order_items.append(oi); See below. Both ways lead to the same result
        '''

        db.session.flush()

        for i in range(fake.random_int(min=1, max=6)):
            product = random.choice(products)
            oi = OrderItem(name=product.name,
                           slug=product.slug,
                           price=product.price,
                           order_id=order.id,
                           product_id=product.id,
                           user_id=user_id,
                           quantity=fake.random_int(min=1, max=5))
            db.session.add(oi)

            # order.order_items.append(oi)

        db.session.commit()
Пример #42
0
def store_fits_header(connection, galaxy_id, group):
    """
    Store the fits header data for a galaxy in the HDF5 file
    """
    LOG.info('Storing the fits headers')
    count = connection.execute(
        select([func.count(FITS_HEADER.c.fitsheader_id)
                ]).where(FITS_HEADER.c.galaxy_id == galaxy_id)).first()[0]
    data = numpy.zeros(count, dtype=data_type_fits_header1_01)
    count = 0
    for fits_header in connection.execute(
            select([FITS_HEADER
                    ]).where(FITS_HEADER.c.galaxy_id == galaxy_id).order_by(
                        FITS_HEADER.c.fitsheader_id)):
        data[count] = (
            fits_header[FITS_HEADER.c.keyword],
            fits_header[FITS_HEADER.c.value],
            fits_header[FITS_HEADER.c.comment],
        )
        count += 1
    group.create_dataset('fits_header', data=data, compression='gzip')
Пример #43
0
def count_star(session: Union[Session, Engine, Connection], tablename: str,
               *criteria: Any) -> int:
    """
    Returns the result of ``COUNT(*)`` from the specified table (with
    additional ``WHERE`` criteria if desired).

    Args:
        session: SQLAlchemy :class:`Session`, :class:`Engine`, or
            :class:`Connection` object
        tablename: name of the table
        criteria: optional SQLAlchemy "where" criteria

    Returns:
        a scalar
    """
    # works if you pass a connection or a session or an engine; all have
    # the execute() method
    query = select([func.count()]).select_from(table(tablename))
    for criterion in criteria:
        query = query.where(criterion)
    return session.execute(query).scalar()
Пример #44
0
def store_area(connection, galaxy_id, group):
    """
    Store the areas associated with a galaxy
    """
    LOG.info('Storing the areas')
    count = connection.execute(select([func.count(AREA.c.area_id)]).where(AREA.c.galaxy_id == galaxy_id)).first()[0]
    data = numpy.zeros(count, dtype=data_type_area)
    count = 0
    for area in connection.execute(select([AREA]).where(AREA.c.galaxy_id == galaxy_id).order_by(AREA.c.area_id)):
        data[count] = (
            area[AREA.c.area_id],
            area[AREA.c.top_x],
            area[AREA.c.top_y],
            area[AREA.c.bottom_x],
            area[AREA.c.bottom_y],
            area[AREA.c.workunit_id] if area[AREA.c.workunit_id] is not None else -1,
            str(area[AREA.c.update_time]),
            )
        count += 1
    group.create_dataset('area', data=data, compression='gzip')
    return count
Пример #45
0
def index():
	"""
	主页
	"""
	logger.info('url = ' + str(request.url))
	logger.info('当前用户是否登录: ' + str(current_user.is_authenticated))
	# 在主页页面,登录用户和未登录用户显示的是不一致的
	# 如果登录了,就会按时间顺序显示自己和关注的用户的发布的图片
	# 而如果没有登录,则显示主页图片以及注册功能
	if current_user.is_authenticated:
		# 页码
		page = request.args.get("page", 1, type=int)
		# 每一页的图片多少
		per_page = current_app.config["ALBUMY_PHOTO_PER_PAGE"]
		pagination = (
			# 关联Follow表,关联条件是Follow的followed_id和Photo的authid_id一致
			Photo.query.join(Follow, Follow.followed_id == Photo.author_id)
				# 过滤出自己关注的对象
				.filter(Follow.follower_id == current_user.id)
				# 按时间顺序排序
				.order_by(Photo.timestamp.desc()).paginate(page, per_page)
		)
		# 得到photo集合
		photos = pagination.items
	else:
		pagination = None
		photos = None
	tags = (
		Tag.query.join(Tag.photos)
			.group_by(Tag.id)
			.order_by(func.count(Photo.id).desc())
			.limit(10)
	)
	return render_template(
		"main/index.html",
		pagination=pagination,
		photos=photos,
		tags=tags,
		Collect=Collect,
	)
Пример #46
0
    def test_migrate_student_reg(self):
        Unittest_with_edcore_sqlite.setUpClass(
            EdMigrateDestConnection.get_datasource_name(
                TestMigrate.test_tenant),
            use_metadata_from_db=False)
        preprod_conn = EdMigrateSourceConnection(
            tenant=get_unittest_preprod_tenant_name())
        prod_conn = EdMigrateDestConnection(
            tenant=get_unittest_prod_tenant_name())
        batch_guid = "0aa942b9-75cf-4055-a67a-8b9ab53a9dfc"
        student_reg_table = preprod_conn.get_table(Constants.STUDENT_REG)
        get_query = select([student_reg_table.c.student_reg_rec_id
                            ]).order_by(student_reg_table.c.student_reg_rec_id)
        count_query = select([func.count().label('student_reg_rec_ids')],
                             student_reg_table.c.student_reg_rec_id.in_(
                                 range(15541, 15551)))

        rset = preprod_conn.execute(get_query)
        row = rset.fetchall()
        self.assertEqual(10, len(row))
        self.assertListEqual([(15541, ), (15542, ), (15543, ), (15544, ),
                              (15545, ), (15546, ), (15547, ), (15548, ),
                              (15549, ), (15550, )], row)
        rset.close()

        rset = prod_conn.execute(count_query)
        row = rset.fetchone()
        self.assertEqual(0, row['student_reg_rec_ids'])
        rset.close()

        delete_count, insert_count = migrate_table(batch_guid, None,
                                                   preprod_conn, prod_conn,
                                                   'student_reg', False)
        self.assertEqual(0, delete_count)
        self.assertEqual(10, insert_count)

        rset = prod_conn.execute(count_query)
        row = rset.fetchone()
        self.assertEqual(10, row['student_reg_rec_ids'])
        rset.close()
Пример #47
0
def store_area_user(connection, galaxy_id, group):
    """
    Store the areas associated with a galaxy
    """
    LOG.info('Storing the area_users')
    count = connection.execute(
        select([func.count(AREA_USER.c.areauser_id)],
               from_obj=AREA_USER.join(AREA)).where(
                   AREA.c.galaxy_id == galaxy_id)).first()[0]
    data = numpy.zeros(count, dtype=data_type_area_user)
    count = 0
    for area_user in connection.execute(
            select([AREA_USER], from_obj=AREA_USER.join(AREA)).where(
                AREA.c.galaxy_id == galaxy_id).order_by(
                    AREA_USER.c.areauser_id)):
        data[count] = (
            area_user[AREA_USER.c.area_id],
            area_user[AREA_USER.c.userid],
            str(area_user[AREA_USER.c.create_time]),
        )
        count += 1
    group.create_dataset('area_user', data=data, compression='gzip')
Пример #48
0
 def insert_feed(cls, source_id, feed_articles):
     insert = Article.__table__.insert().prefix_with('IGNORE')
     article_list = []
     for position in feed_articles:
         distress = filter_dir.binary_filter.classify_bert([position['title']])
         article_list.append({
             'title': position['title'],
             'body': position['summary'],
             'link': position['link'],
             'guid': position['id'],
             'distress': int(distress),
             'source_id': source_id,
             'date_published': position['published'],
             'img_link': position['img_link'],
             'img_credit': position['img_credit'],
             'tags': position['tags']
         })
     db.engine.execute(insert, article_list)
     count = db.session.query(func.count(Article.title)).scalar()
     if count>100:
         db.session.query(func.min(Article.date_added)).one().delete()
         db.session.commit()
Пример #49
0
 def post(self):
     request_data = request.data
     normal_user_id = request_data["userId"]
     try: 
         purchased_gifts = db.session.query(User_Gift.gift_id,func.count(User_Gift.gift_id)).filter(and_(User_Gift.user_id==normal_user_id,User_Gift.status=="active")).group_by(User_Gift.gift_id).all()
         print(purchased_gifts)
         
         giftList=[]
         for purchased_gift in purchased_gifts:
             giftInfo={}
             giftInfo["giftId"]=purchased_gift[0]
             gift=Merchant_Gift.query.get(purchased_gift[0])
             giftInfo["giftName"]=gift.gift_name
             giftInfo["amount"]=gift.amount
             giftInfo["count"]=purchased_gift[1]
             giftList.append(giftInfo)
         print(giftList)
         message = "Success"
         return self.response("200","false",giftList,message)
     except Exception as err:
         message = str(err)
         return self.response("503", "true",{}, message)
Пример #50
0
def stats_prometheus_route():
    """returns internal stats; prometheus"""

    stats = {}

    stats['sner_storage_hosts_total'] = Host.query.count()
    stats['sner_storage_services_total'] = Service.query.count()
    stats['sner_storage_vulns_total'] = Vuln.query.count()
    stats['sner_storage_notes_total'] = Note.query.count()

    stale_horizont = datetime.utcnow() - timedelta(days=5)
    stats['sner_scheduler_jobs_total{{state="running"}}'] = Job.query.filter(Job.retval == None, Job.time_start > stale_horizont).count()  # noqa: E501,E711  pylint: disable=singleton-comparison
    stats['sner_scheduler_jobs_total{{state="stale"}}'] = Job.query.filter(Job.retval == None, Job.time_start < stale_horizont).count()  # noqa: E501,E711  pylint: disable=singleton-comparison
    stats['sner_scheduler_jobs_total{{state="finished"}}'] = Job.query.filter(Job.retval == 0).count()
    stats['sner_scheduler_jobs_total{{state="failed"}}'] = Job.query.filter(Job.retval != 0).count()

    queue_targets = db.session.query(Queue.name, func.count(Target.id).label('cnt')).select_from(Queue).outerjoin(Target).group_by(Queue.name).all()
    for queue, targets in queue_targets:
        stats[f'sner_scheduler_queue_targets_total{{name="{queue}"}}'] = targets

    output = '\n'.join(f'{key} {val}' for key, val in stats.items())
    return Response(output, mimetype='text/plain')
Пример #51
0
def get_fqdn_hash_range(db):
    hash_counts = (db.query(
        db_models.Frontier.fqdn_hash_fetcher_index,
        func.count(db_models.Frontier.fqdn),
    ).group_by(db_models.Frontier.fqdn_hash_fetcher_index).order_by(
        db_models.Frontier.fqdn_hash_fetcher_index).all())

    hash_values = [x[1] for x in hash_counts]

    if hash_values:
        count = len(hash_values)
        min_value = min(hash_values)
        max_value = max(hash_values)
        avg_value = sum(hash_values) / count

        hash_range = max_value - min_value
        perc_range = (hash_range / 2) / avg_value

    else:
        perc_range = 0.0

    return round(perc_range, 2)
Пример #52
0
def list():
    now_time = datetime.datetime.now()
    day_now = now_time.strftime("%Y-%m-%d")
    day_before = (now_time - datetime.timedelta(days=1)).strftime("%Y-%m-%d")

    username_set = set()
    username_set.add('11')
    username_set.add('22')
    user_list = User.query.filter(func.find_in_set(User.username,
                                                   username_set)).all()

    User.session.query(func.count('*').label('count'))\
        .group_by(func.DATE_FORMAT(User.update_time, '%Y/%m'))

    current_page = 1
    page_size = 10
    ids = [1, 2, 3]
    User.query.filter(and_(User.id.in_(ids)), User.name.in_(ids))\
        .outerjoin(WxInfo, User.id == WxInfo.id).add_entity(WxInfo)\
        .group_by(User.id) \
        .add_column(func.group_concat(User.name)) \
        .paginate(current_page, page_size)
Пример #53
0
def main() -> None:
    BATCH_SIZE = 50000
    total_count = 0
    with ScopedSession() as session:
        offset = 0
        db_comments_count = session.query(func.count(
            Comment.comment_id)).scalar()
        parsed_comments = set()
        while True:
            print(f'Offset: {offset}')
            q = session.query(Comment).order_by(
                Comment.comment_id).limit(BATCH_SIZE).offset(offset)
            for comment in q:
                comment.text = normalize_text(comment.text)
                parsed_comments.add(comment.comment_id)
                total_count += 1
            offset += BATCH_SIZE
            if offset > db_comments_count:
                break
    print(
        f'Total_count: {total_count}, db_count: {db_comments_count}, parsed_comments_len: {len(parsed_comments)}'
    )
Пример #54
0
def home():
    def join(query):
        return query.options( \
         joinedload(Package.license), \
         joinedload(Package.media_license))

    query = Package.query.filter_by(state=PackageState.APPROVED)
    count = query.count()

    new = join(query.order_by(db.desc(Package.approved_at))).limit(8).all()
    pop_mod = join(
        query.filter_by(type=PackageType.MOD).order_by(db.desc(
            Package.score))).limit(8).all()
    pop_gam = join(
        query.filter_by(type=PackageType.GAME).order_by(db.desc(
            Package.score))).limit(4).all()
    pop_txp = join(
        query.filter_by(type=PackageType.TXP).order_by(db.desc(
            Package.score))).limit(4).all()

    updated = db.session.query(Package).select_from(PackageRelease).join(Package) \
      .filter_by(state=PackageState.APPROVED) \
      .order_by(db.desc(PackageRelease.releaseDate)) \
      .limit(20).all()
    updated = updated[:8]

    reviews = PackageReview.query.filter_by(recommends=True).order_by(
        db.desc(PackageReview.created_at)).limit(5).all()

    downloads_result = db.session.query(func.sum(
        Package.downloads)).one_or_none()
    downloads = 0 if not downloads_result or not downloads_result[
        0] else downloads_result[0]

    tags = db.session.query(func.count(Tags.c.tag_id), Tag) \
     .select_from(Tag).outerjoin(Tags).group_by(Tag.id).order_by(db.asc(Tag.title)).all()

    return render_template("index.html", count=count, downloads=downloads, tags=tags, \
      new=new, updated=updated, pop_mod=pop_mod, pop_txp=pop_txp, pop_gam=pop_gam, reviews=reviews)
Пример #55
0
    def speaking_language(self, languages=None, return_all_languages=False, with_count=False, reverse_order=False):
        from . import get_locale

        if with_count:
            if not languages:
                languages = self.session.query(Languages, func.count('books_languages_link.book'))\
                    .join(books_languages_link).join(Books)\
                    .filter(self.common_filters(return_all_languages=return_all_languages)) \
                    .group_by(text('books_languages_link.lang_code')).all()
            for lang in languages:
                lang[0].name = isoLanguages.get_language_name(get_locale(), lang[0].lang_code)
            return sorted(languages, key=lambda x: x[0].name, reverse=reverse_order)
        else:
            if not languages:
                languages = self.session.query(Languages) \
                    .join(books_languages_link) \
                    .join(Books) \
                    .filter(self.common_filters(return_all_languages=return_all_languages)) \
                    .group_by(text('books_languages_link.lang_code')).all()
            for lang in languages:
                lang.name = isoLanguages.get_language_name(get_locale(), lang.lang_code)
            return sorted(languages, key=lambda x: x.name, reverse=reverse_order)
Пример #56
0
def count_star_and_max(session: Union[Session, Engine, Connection],
                       tablename: str, maxfield: str,
                       *criteria: Any) -> Tuple[int, Optional[int]]:
    """

    Args:
        session: SQLAlchemy :class:`Session`, :class:`Engine`, or
            :class:`Connection` object
        tablename: name of the table
        maxfield: name of column (field) to take the ``MAX()`` of
        criteria: optional SQLAlchemy "where" criteria

    Returns:
        a tuple: ``(count, maximum)``

    """
    query = select([func.count(),
                    func.max(column(maxfield))]).select_from(table(tablename))
    for criterion in criteria:
        query = query.where(criterion)
    result = session.execute(query)
    return result.fetchone()  # count, maximum
Пример #57
0
    def get_staging_demographic_counts(self):
        demographics = [
            'hispanicorlatinoethnicity', 'americanindianoralaskanative',
            'asian', 'blackorafricanamerican',
            'nativehawaiianorotherpacificislander', 'white',
            'demographicracetwoormoreraces', 'ideaindicator', 'lepstatus',
            'section504status', 'economicdisadvantagestatus', 'migrantstatus'
        ]
        results_dict = {}
        with get_udl_connection() as conn:
            stg_outcome = conn.get_table('stg_sbac_asmt_outcome')
            for entry in demographics:
                query = select([func.count(stg_outcome.c[entry])],
                               from_obj=stg_outcome).where(
                                   stg_outcome.c[entry].in_(['Y', 'y', 'yes']))
                result = conn.execute(query)
                for row in result:
                    demo_count = row[0]

                results_dict[entry] = demo_count

        corrleated_results = {
            'dmg_eth_hsp': results_dict['hispanicorlatinoethnicity'],
            'dmg_eth_ami': results_dict['americanindianoralaskanative'],
            'dmg_eth_asn': results_dict['asian'],
            'dmg_eth_blk': results_dict['blackorafricanamerican'],
            'dmg_eth_pcf':
            results_dict['nativehawaiianorotherpacificislander'],
            'dmg_eth_wht': results_dict['white'],
            'dmg_eth_2om': results_dict['demographicracetwoormoreraces'],
            'dmg_prg_iep': results_dict['ideaindicator'],
            'dmg_prg_lep': results_dict['lepstatus'],
            'dmg_prg_504': results_dict['section504status'],
            'dmg_sts_ecd': results_dict['economicdisadvantagestatus'],
            'dmg_sts_mig': results_dict['migrantstatus'],
        }

        return corrleated_results
Пример #58
0
def get_data_range(session, device_id, date_from, date_to, resolution):
    row_count = session.query(func.count(DefaultDeviceData.id)).filter(
        DefaultDeviceData.date > date_from,
        DefaultDeviceData.date < date_to).filter_by(
            device_id=device_id).scalar()
    data_list = []
    if (row_count > resolution):
        all_data = session.query(DefaultDeviceData).filter(
            DefaultDeviceData.date > date_from,
            DefaultDeviceData.date < date_to).filter_by(
                device_id=device_id).all()

        skip_rows = floor(row_count / resolution)
        counter = 0
        data_sum = 0
        for item in all_data:
            counter += 1
            data_sum += item.temp
            if (counter == skip_rows):
                avg_item = item
                avg_item.temp = round(data_sum / counter, 2)
                data_list.append(avg_item)

                counter = 0
                data_sum = 0
    else:
        data_list = session.query(DefaultDeviceData).filter(
            DefaultDeviceData.date > date_from,
            DefaultDeviceData.date < date_to).filter_by(
                device_id=device_id).all()

    data_object_list = []
    for data in data_list:
        data_object = DeviceDataView(data.id, 1 if data.led_state else 0,
                                     str(data.temp), str(data.date))
        data_object_list.append(data_object)

    return jsonify({'data': [result.serialize for result in data_object_list]})
Пример #59
0
    def show(self, fullname, crew):

        self.view['fullname'] = fullname

        sql = """
        select id from movie
        where meta is not null
              and btrim(json_select(meta, '{"%s"}')::text, '"') like :fullname """

        if crew == 'director':
            sql = sql % 'Director'

        elif crew == 'screenwriter':
            sql = sql % 'Writer'

        elif crew == 'actor':
            sql = sql % 'Actors'

        else:
            raise RuntimeError, 'routing for crew:%s not provided' % crew

        fullname_ = fullname.encode('ascii', 'replace').replace('?','%')
        movie_ids = self.session.execute(sql, {'fullname': '%'+fullname_+'%'}).fetchall()
        movie_ids = map(itemgetter0, movie_ids)

        sq = self.session.query(Rating.movie_id,
                                func.avg(Rating.rating).label('avg_rating'),
                                func.count(Rating.user_id).label('rev_cnt'))\
                 .group_by(Rating.movie_id).subquery()

        movies = self.session.query(Movie, sq.c.avg_rating, sq.c.rev_cnt)\
                    .outerjoin((sq, sq.c.movie_id==Movie.id))\
                    .filter(Movie.id.in_(movie_ids))\
                    .order_by(Movie.year.desc()).all()

        self.view.update({'crew': crew,
                          'movies': movies})
        self.template = 'cast.phtml'
Пример #60
0
def leaderboard_json(request):
    """ Render a top-users JSON dump. """

    user = _get_user(request, request.matchdict.get('id'))

    leaderboard = request.db.session.query(
        m.Person, func.count(m.Person.assertions)).join(
            m.Assertion).order_by('count_1 desc').filter(
                m.Person.opt_out == False).group_by(m.Person.id).all()

    # Hackishly, but relatively cheaply get the rank of all users.
    # This is:
    # { <person object>:
    #   {
    #     'badges': <number of badges they have>,
    #     'rank': <their global rank>
    #   }
    # }
    user_to_rank = dict([[data[0], {
        'badges': data[1],
        'rank': idx + 1
    }] for idx, data in enumerate(leaderboard)])

    if user:
        idx = [i[0] for i in leaderboard].index(user)
        # Handle the case of leaderboard[-2:2] which will be [] always.
        if idx < 2:
            idx = 2
        leaderboard = leaderboard[(idx - 2):(idx + 3)]
    else:
        leaderboard = leaderboard[:25]

    ret = [
        dict(user_to_rank[p[0]].items() + {'nickname': p[0].nickname}.items())
        for p in leaderboard
    ]

    return {'leaderboard': ret}