def party_counts(self): # if the bill cosponsorships have party_ids, then use it. party_counts = db_session.query(func.count(distinct(Cosponsorship.person_id)))\ .join(Cosponsorship.bill)\ .filter(Bill.id == self.id)\ .outerjoin(Cosponsorship.party)\ .add_columns(Party.name, Party.color)\ .group_by(Party.id) # Otherwise, use the most recent party affiliation of candidacy info. if any(party is None for _, party, _ in party_counts): party_counts = db_session.query(Party.name, func.count(distinct(Person.id)), Party.color)\ .join(Candidacy)\ .join(Election)\ .filter(Election.assembly_id == self.assembly_id)\ .join(Person)\ .join(Cosponsorship)\ .join(Bill)\ .filter(Bill.id == self.id)\ .group_by(Party.id) else: party_counts = ((party, count, color) for count, party, color in party_counts) return list(party_counts)
def components_list(db, opsysrelease_ids, associate_ids=None): ''' Returns a list of tuples consisting from component's id and component's name ''' sub = db.session.query(distinct(Report.component_id)).subquery() components_query = (db.session.query( OpSysComponent.id, OpSysComponent.name).filter( OpSysComponent.id.in_(sub)).order_by(OpSysComponent.name)) if opsysrelease_ids: fsub = (db.session.query(distinct( OpSysReleaseComponent.components_id)).filter( OpSysReleaseComponent.opsysreleases_id.in_(opsysrelease_ids))) components_query = (components_query.filter( OpSysComponent.id.in_(fsub))) if associate_ids: fsub = (db.session.query(distinct( OpSysReleaseComponent.components_id)).filter( OpSysReleaseComponent.id.in_( db.session.query( OpSysReleaseComponentAssociate.opsysreleasecompoents_id ).filter( OpSysReleaseComponentAssociate.associatepeople_id.in_( associate_ids))))) components_query = (components_query.filter( OpSysComponent.id.in_(fsub))) return components_query.all()
def _purge_filtered_data(instance: Recorder, session: Session) -> bool: """Remove filtered states and events that shouldn't be in the database.""" _LOGGER.debug("Cleanup filtered data") using_sqlite = instance.dialect_name == SupportedDialect.SQLITE # Check if excluded entity_ids are in database excluded_entity_ids: list[str] = [ entity_id for (entity_id, ) in session.query(distinct(States.entity_id)).all() if not instance.entity_filter(entity_id) ] if len(excluded_entity_ids) > 0: _purge_filtered_states(instance, session, excluded_entity_ids, using_sqlite) return False # Check if excluded event_types are in database excluded_event_types: list[str] = [ event_type for (event_type, ) in session.query(distinct(Events.event_type)).all() if event_type in instance.exclude_t ] if len(excluded_event_types) > 0: _purge_filtered_events(instance, session, excluded_event_types) return False return True
def components_list(db, opsysrelease_ids, associate_ids=None): ''' Returns a list of tuples consisting from component's id and component's name ''' sub = db.session.query(distinct(Report.component_id)).subquery() components_query = (db.session.query(OpSysComponent.id, OpSysComponent.name) .filter(OpSysComponent.id.in_(sub)) .order_by(OpSysComponent.name)) if opsysrelease_ids: fsub = (db.session.query( distinct(OpSysReleaseComponent.components_id)) .filter(OpSysReleaseComponent.opsysreleases_id.in_( opsysrelease_ids))) components_query = (components_query .filter(OpSysComponent.id.in_(fsub))) if associate_ids: fsub = (db.session.query( distinct(OpSysReleaseComponent.components_id)) .filter(OpSysReleaseComponent.id.in_( db.session.query(OpSysReleaseComponentAssociate.opsysreleasecompoents_id) .filter(OpSysReleaseComponentAssociate.associatepeople_id.in_(associate_ids))))) components_query = (components_query .filter(OpSysComponent.id.in_(fsub))) return components_query.all()
def get_tips(session, tid): tips = [] comments_by_itip = {} messages_by_itip = {} files_by_itip = {} # Fetch comments count for itip_id, count in session.query(models.InternalTip.id, func.count(distinct(models.Comment.id))) \ .filter(models.Comment.internaltip_id == models.InternalTip.id, models.InternalTip.tid == tid) \ .group_by(models.InternalTip.id): comments_by_itip[itip_id] = count # Fetch messages count for itip_id, count in session.query(models.InternalTip.id, func.count(distinct(models.Message.id))) \ .filter(models.Message.receivertip_id == models.ReceiverTip.id, models.ReceiverTip.internaltip_id == models.InternalTip.id, models.InternalTip.tid == tid) \ .group_by(models.InternalTip.id): messages_by_itip[itip_id] = count # Fetch files count for itip_id, count in session.query(models.InternalTip.id, func.count(distinct(models.InternalFile.id))) \ .filter(models.InternalFile.internaltip_id == models.InternalTip.id, models.InternalTip.tid == tid) \ .group_by(models.InternalTip.id): files_by_itip[itip_id] = count for itip in session.query( models.InternalTip).filter(models.InternalTip.tid == tid): tips.append({ 'id': itip.id, 'creation_date': itip.creation_date, 'last_update': itip.update_date, 'expiration_date': itip.expiration_date, 'context_id': itip.context_id, 'status': itip.status, 'substatus': itip.substatus, 'tor': not itip.https, 'comments': comments_by_itip.get(itip.id, 0), 'messages': messages_by_itip.get(itip.id, 0), 'files': files_by_itip.get(itip.id, 0), 'wb_last_access': itip.wb_last_access }) return tips
def getrequests(cls): _session = db.session _archivedrequestids = _session.query(distinct( FOIRawRequest.requestid)).filter( FOIRawRequest.status.in_(['Archived'])).all() _requestids = _session.query(distinct(FOIRawRequest.requestid)).filter( FOIRawRequest.requestid.notin_(_archivedrequestids)).all() requests = [] for _requestid in _requestids: request = _session.query(FOIRawRequest).filter( FOIRawRequest.requestid == _requestid).order_by( FOIRawRequest.version.desc()).first() requests.append(request) return requests
def get_recipient_users(self): groups = [] if self.daily_schedule_subscribers.data: log.info("Email recipients includes daily schedule subscribers") groups.append(User.query .options( lazyload(User.organisation), lazyload(User.committee_alerts), )\ .filter(User.subscribe_daily_schedule == True) .filter(User.confirmed_at != None) .all()) if self.committee_ids.data: log.info("Email recipients includes subscribers for these committees: %s" % self.committee_ids.data) user_ids = db.session\ .query(distinct(user_committee_alerts.c.user_id))\ .filter(user_committee_alerts.c.committee_id.in_(self.committee_ids.data))\ .all() user_ids = [u[0] for u in user_ids] groups.append(User.query .options( lazyload(User.organisation), lazyload(User.committee_alerts), )\ .filter(User.id.in_(user_ids)) .filter(User.confirmed_at != None) .all()) return set(u for u in chain(*groups))
def get_allowed_hosts(session): result = (session .query(distinct(AccessWebService.host)) .filter(and_(AccessWebService.host != None, AccessWebService.disable == 0)).all()) result = [item[0].encode('utf-8', 'ignore') for item in result] return result
def get_synthese_stat(): params = request.args q = DB.session.query( label("year", func.date_part("year", VSynthese.date_min)), func.count(VSynthese.id_synthese), func.count(distinct(VSynthese.cd_ref)), ).group_by("year") if ("selectedRegne" in params) and (params["selectedRegne"] != ""): q = q.filter(VSynthese.regne == params["selectedRegne"]) if ("selectedPhylum" in params) and (params["selectedPhylum"] != ""): q = q.filter(VSynthese.phylum == params["selectedPhylum"]) if "selectedClasse" in params and (params["selectedClasse"] != ""): q = q.filter(VSynthese.classe == params["selectedClasse"]) if "selectedOrdre" in params and (params["selectedOrdre"] != ""): q = q.filter(VSynthese.ordre == params["selectedOrdre"]) if "selectedFamille" in params and (params["selectedFamille"] != ""): q = q.filter(VSynthese.famille == params["selectedFamille"]) if ("selectedGroup2INPN" in params) and (params["selectedGroup2INPN"] != ""): q = q.filter(VSynthese.group2_inpn == params["selectedGroup2INPN"]) if ("selectedGroup1INPN" in params) and (params["selectedGroup1INPN"] != ""): q = q.filter(VSynthese.group1_inpn == params["selectedGroup1INPN"]) if ("taxon" in params) and (params["taxon"] != ""): q = q.filter(VSynthese.cd_ref == params["taxon"]) return q.all()
def get_irradiation_names(self): with self.session_ctx() as sess: q = sess.query(distinct(IrradiationLevelTable.IrradBaseID)) vs = q.all() if vs: vs = sorted([vi[0] for vi in vs], reverse=True) return vs
def prepare_pre_pdf(tenant, state_code, batch_guid): ''' prepare which state and district are pre-cached :param string tenant: name of the tenant :param string state_code: stateCode representing the state :param string batch_guid: batch GUID :rType: list :return: list of results containing student information used to generate pdf ''' with EdCoreDBConnection(tenant=tenant) as connector: fact_asmt_outcome_vw = connector.get_table(Constants.FACT_ASMT_OUTCOME_VW) dim_asmt = connector.get_table(Constants.DIM_ASMT) query = select([distinct(fact_asmt_outcome_vw.c.student_id).label(Constants.STUDENT_ID), dim_asmt.c.asmt_period_year.label(Constants.ASMT_PERIOD_YEAR), fact_asmt_outcome_vw.c.date_taken.label(Constants.DATETAKEN), dim_asmt.c.asmt_type.label(Constants.ASMT_TYPE), fact_asmt_outcome_vw.c.district_id.label(Constants.DISTRICT_ID), fact_asmt_outcome_vw.c.school_id.label(Constants.SCHOOL_ID), fact_asmt_outcome_vw.c.asmt_grade.label(Constants.ASMT_GRADE)], from_obj=[fact_asmt_outcome_vw .join(dim_asmt, and_(dim_asmt.c.asmt_rec_id == fact_asmt_outcome_vw.c.asmt_rec_id, dim_asmt.c.rec_status == Constants.CURRENT))]) query = query.where(fact_asmt_outcome_vw.c.state_code == state_code) query = query.where(and_(fact_asmt_outcome_vw.c.batch_guid == batch_guid)) query = query.where(and_(fact_asmt_outcome_vw.c.rec_status == Constants.CURRENT)) results = connector.get_result(query) return results
def generate_query_for_summative_or_interim(connection, asmt_type, student_ids, asmt_year, date_taken): fact_table = connection.get_table(Constants.FACT_ASMT_OUTCOME_VW) dim_asmt = connection.get_table(Constants.DIM_ASMT) query = Select( [ distinct(fact_table.c.student_id).label(Constants.STUDENT_ID), fact_table.c.state_code.label(Constants.STATE_CODE), dim_asmt.c.asmt_period_year.label(Constants.ASMT_PERIOD_YEAR), fact_table.c.date_taken.label(Constants.DATETAKEN), fact_table.c.district_id.label(Constants.DISTRICT_ID), fact_table.c.school_id.label(Constants.SCHOOL_ID), fact_table.c.asmt_grade.label(Constants.ASMT_GRADE) ], from_obj=[ fact_table.join( dim_asmt, and_(dim_asmt.c.asmt_rec_id == fact_table.c.asmt_rec_id, dim_asmt.c.rec_status == Constants.CURRENT, dim_asmt.c.asmt_type == asmt_type, dim_asmt.c.asmt_period_year == asmt_year)) ]) query = query.where( and_(fact_table.c.rec_status == Constants.CURRENT, fact_table.c.student_id.in_(student_ids))) query = query.order_by(fact_table.c.student_id, fact_table.c.date_taken) if date_taken is not None: query = query.where(and_(fact_table.c.date_taken == date_taken)) return query
def generate_query_for_iab(connection, student_ids, asmt_year): fact_table = connection.get_table(Constants.FACT_BLOCK_ASMT_OUTCOME) dim_asmt = connection.get_table(Constants.DIM_ASMT) query = Select( [ distinct(fact_table.c.student_id).label(Constants.STUDENT_ID), fact_table.c.state_code.label(Constants.STATE_CODE), dim_asmt.c.asmt_period_year.label(Constants.ASMT_PERIOD_YEAR), fact_table.c.district_id.label(Constants.DISTRICT_ID), fact_table.c.school_id.label(Constants.SCHOOL_ID) ], from_obj=[ fact_table.join( dim_asmt, and_( dim_asmt.c.asmt_rec_id == fact_table.c.asmt_rec_id, dim_asmt.c.rec_status == Constants.CURRENT, dim_asmt.c.asmt_type == AssessmentType.INTERIM_ASSESSMENT_BLOCKS, dim_asmt.c.asmt_period_year == asmt_year)) ]) query = query.where( and_(fact_table.c.rec_status == Constants.CURRENT, fact_table.c.student_id.in_(student_ids))) return query
def get_student_reg_academic_years(state_code, tenant=None): with EdCoreDBConnection(tenant=tenant, state_code=state_code) as connection: student_reg = connection.get_table(Constants.STUDENT_REG) query = select([distinct(student_reg.c.academic_year).label(Constants.ACADEMIC_YEAR)])\ .where(student_reg.c.state_code == state_code).order_by(student_reg.c.academic_year.desc()) results = connection.get_result(query) return list(result[Constants.ACADEMIC_YEAR] for result in results)
def count_predicts_features(self): from sqlalchemy.sql.expression import func, distinct t_f = schema.feature t_fs = schema.feature_set return self.session.query(func.count(distinct(t_f.idfeature))) \ .join(t_fs, t_fs.idfeature_set == t_f.idfeature_set) \ .filter(t_fs.idmodel == self.idmodel)
def filter_gene_names_present_in_database(gene_names_of_interest): _session = db.create_scoped_session() _log.info( "Filtering gene names that are already present in the database ...") try: # Make sure the gene names are a set, so we can pop them gene_names_of_interest = set(gene_names_of_interest) # check which gene names are already present in the database n_gene_names = len(gene_names_of_interest) n_filtered_gene_names = 0 for gene_name in _session.query(distinct(Gene.gene_name)).filter( Gene.gene_name.in_(gene_names_of_interest)).all(): gene_names_of_interest.remove(gene_name[0]) n_filtered_gene_names += 1 _log.info("Filtered '" + str(n_filtered_gene_names) + "' out of '" + str(n_gene_names) + "' gene names that are already present in the database ...") return list(gene_names_of_interest) except: _log.error(traceback.format_exc()) raise finally: # Close this session, thus all items are cleared and memory usage is kept at a minimum _session.remove()
def get_recipient_users(self): groups = [] if self.daily_schedule_subscribers.data: log.info("Email recipients includes daily schedule subscribers") groups.append(User.query .options( lazyload(User.organisation), lazyload(User.committee_alerts), )\ .filter(User.subscribe_daily_schedule == True) .all()) if self.committee_ids.data: log.info( "Email recipients includes subscribers for these committees: %s" % self.committee_ids.data) user_ids = db.session\ .query(distinct(user_committee_alerts.c.user_id))\ .filter(user_committee_alerts.c.committee_id.in_(self.committee_ids.data))\ .all() user_ids = [u[0] for u in user_ids] groups.append(User.query .options( lazyload(User.organisation), lazyload(User.committee_alerts), )\ .filter(User.id.in_(user_ids)) .all()) return set(u for u in chain(*groups))
def get_select_for_state_view(dim_inst_hier, state_code): return select([ distinct(dim_inst_hier.c.district_id).label(Constants.ID), dim_inst_hier.c.district_name.label(Constants.NAME) ], from_obj=[dim_inst_hier ]).where(dim_inst_hier.c.state_code == state_code)
def request_db(): #queries for 2 random actor names in the database and saves them as session result_list = [ p for (p, ) in app.session.query(distinct(Movies.actor_name)).order_by( func.rand()).limit(2) ] session['result_list'] = result_list app.session.close()
def find_existing( cls, *, session: Session, server: Server, channel_xid: int, size: int, seats: int, tags: List[Tag], system: str, power: Optional[int], ) -> Optional[Game]: guild_xid = server.guild_xid required_tag_ids = set(tag.id for tag in tags) select_filters = [ Game.status == "pending", Game.guild_xid == guild_xid, Game.size == size, Game.channel_xid == channel_xid, Game.system == system, ] having_filters = [ func.count(distinct(games_tags.c.tag_id)) == len(required_tag_ids), func.count(distinct(User.xid)) <= size - seats, ] if power: having_filters.append( between(func.avg(User.power), power - 1, power + 1)) if power >= 7: having_filters.append(func.avg(User.power) >= 7) else: having_filters.append(func.avg(User.power) < 7) else: select_filters.append(User.power == None) inner = (session.query(Game.id).join(User, isouter=True).join( games_tags, isouter=True).filter(and_(*select_filters)).group_by( Game.id).having(and_(*having_filters))) tag_filters = [] for tid in required_tag_ids: tag_filters.append(games_tags.c.tag_id == tid) outer = (session.query(Game).join(games_tags, isouter=True).filter( and_(Game.id.in_(inner), or_(*tag_filters))).group_by(Game.id).having( having_filters[0]).order_by(desc(Game.updated_at))) game: Optional[Game] = outer.first() return game
def get_select_for_school_view(fact_table, state_code, district_id, school_id, asmtYear, asmtType, subject): return select([distinct(fact_table.c.asmt_grade).label(Constants.ID), fact_table.c.asmt_grade.label(Constants.NAME)], from_obj=[fact_table])\ .where(and_(fact_table.c.state_code == state_code, fact_table.c.district_id == district_id, fact_table.c.school_id == school_id, fact_table.c.asmt_year == asmtYear, fact_table.c.rec_status == 'C', fact_table.c.asmt_type == asmtType, fact_table.c.asmt_subject == subject))
def data(request): dbsession = DBSession() geom = request.params['geom'] if 'geom' in request.params else 'polygon' srs = int(request.params['srs']) if 'srs' in request.params else 4326 root = int(request.params['root']) if 'root' in request.params else None depth = int(request.params['depth']) if 'depth' in request.params else 1 level = int(request.params['level']) if 'level' in request.params else None filter = [] # отбор по иерархии p_coalesce = [] t_child = [] for l in range(depth): t_tab = aliased(Unit) p_coalesce.insert(0, t_tab.id) t_child.append(t_tab) q_child = dbsession.query(distinct(func.coalesce(*p_coalesce)).label('child_id')) \ .filter(t_child[0].parent_id == root) for l in range(depth): if l == 0: pass else: q_child = q_child.join((t_child[l], t_child[l - 1].child)) child_id = [r.child_id for r in q_child.all()] # геометрия if geom == 'polygon': t_geom = UnitPolygon elif geom == 'point': t_geom = UnitPoint e_geom = func.st_astext(func.st_transform(func.st_setsrid(t_geom.geom, 4326), srs)) q_unit = dbsession.query(Unit, e_geom.label('geom')).options(joinedload(Unit.protocol_o), joinedload(Unit.protocol_i)).filter(Unit.id.in_(child_id)) \ .outerjoin((t_geom, t_geom.unit_id == Unit.id)) if level: q_unit = q_unit.filter(Unit.level == level) features = [] for record in q_unit.all(): properties = dict(protocol_o=record.Unit.protocol_o.as_dict() if record.Unit.protocol_o else None, protocol_i=record.Unit.protocol_i.as_dict() if record.Unit.protocol_i else None, unit=record.Unit.as_dict()) features.append(Feature(id=record.Unit.id, geometry=loads_wkt(record.geom) if record.geom else None, properties=properties)) fcoll = dumps_geojson(FeatureCollection(features)) if 'callback' in request.params: return Response('%s(%s)' % (request.params['callback'], fcoll), content_type="text/javascript") else: return Response(fcoll, content_type="application/json")
def get_user_languages(user_id): return ([ p for p, in ( db.session.query(distinct(Phrase.language)) .filter( Phrase.user_id == user_id, Phrase.status == Phrase.Status.visible.value, ) ) ])
def get_years(model): if model == "distinct": q = DB.session.query( label("year", distinct(func.date_part("year", VSynthese.date_min))) ).order_by("year") if model == "min-max": q = DB.session.query( func.min(func.date_part("year", VSynthese.date_min)), func.max(func.date_part("year", VSynthese.date_min)), ) return q.all()
def getEntriesFiltered(self, identifier, accessId, **options): ''' @see: IACLPrototype.getEntriesFiltered ''' assert isinstance(accessId, int), 'Invalid access id %s' % accessId sql = self.session().query(distinct(EntryMapped.Position)) sql = sql.join(self.EntryFilter).join(self.AclAccess).join(self.Acl) sql = sql.filter(self.AclAccess.accessId == accessId).filter(self.AclIdentifier == identifier) sql = sql.order_by(EntryMapped.Position) return iterateCollection(sql, **options)
def autocomplete_tags(self, all=False): text = request.GET.get('val', None) # filter warnings import warnings warnings.filterwarnings('ignore', module='pylons.decorators') if text: if all: query = meta.Session.query(expression.distinct(expression.func.lower(Tag.title))) else: query = meta.Session.query(expression.distinct(expression.func.lower(SimpleTag.title))) query = query.filter(or_(Tag.title_short.op('ILIKE')('%s%%' % text), Tag.title.op('ILIKE')('%s%%' % text))) results = [title for title in query.all()] return dict(values = results) return None
def mspectra(self, scanid, mslevel=None): """Returns dict with peaks of a scan Also returns the cutoff applied to the scan and mslevel, precursor.id (parent scan id) and precursor.mz scanid Scan identifier of scan of which to return the mspectra mslevel Ms level on which the scan must be. Optional. If scanid not on mslevel raises ScanNotFound """ scanq = self.session.query(Scan).filter(Scan.scanid == scanid) if (mslevel is not None): scanq = scanq.filter(Scan.mslevel == mslevel) try: scan = scanq.one() except NoResultFound: raise ScanNotFound() # lvl1 scans use absolute cutoff, lvl>1 use ratio of basepeak as cutoff if (scan.mslevel == 1): cutoff = self.session.query(Run.ms_intensity_cutoff).scalar() else: rel_cutoff = Scan.basepeakintensity * Run.msms_intensity_cutoff q = self.session.query(rel_cutoff) cutoff = q.filter(Scan.scanid == scanid).scalar() peaks = [] for peak in self.session.query(Peak).filter_by(scanid=scanid): peaks.append({ 'mz': peak.mz, 'intensity': peak.intensity, 'assigned_molid': peak.assigned_molid, }) precursor = {'id': scan.precursorscanid, 'mz': scan.precursormz} response = { 'peaks': peaks, 'cutoff': cutoff, 'mslevel': scan.mslevel, 'precursor': precursor, } if (scan.mslevel == 1): fragments = [] fragq = self.session.query(distinct(Fragment.mz).label('mz')) fragq = fragq.filter_by(scanid=scanid) for fragment in fragq: fragments.append({'mz': fragment.mz}) response['fragments'] = fragments return response
def recent_codenames(cls, fullname): """Get a list of recent codenames used for 300x100 ads. The 300x100 ads get a codename that looks like "fullname_campaign". This function gets a list of recent campaigns. """ time_points = get_time_points('day') query = (Session.query(distinct(cls.codename).label("codename")) .filter(cls.date.in_(time_points)) .filter(cls.codename.startswith(fullname))) return [row.codename for row in query]
def GetSquads(run_id=None): conditions = (Team.table.c.present > 0) if run_id: run = Run.get_by(id=run_id) conditions = conditions & (Team.table.c.size==run.size) & (Team.table.c.present.op('&')(1 << (run.day - 1))) res = session.execute(select([distinct(Team.table.c.squad)], conditions)).fetchall() squads = [item for sublist in res for item in sublist] for s in squads[:]: if not s: squads.remove(s) return squads
def getActionsRoot(self, identifier, **options): ''' @see: IUserRbacService.getActions ''' rbacId = self.findRbacId(identifier) if rbacId is None: return emptyCollection(**options) sql = self.session().query(distinct(RightAction.actionPath)) sql = sql.filter(RightAction.categoryId.in_(self.sqlRights(rbacId))) # @UndefinedVariable return processCollection(listRootPaths(path for path, in sql.all()), **options)
def getEntriesFiltered(self, identifier, accessId, **options): ''' @see: IACLPrototype.getEntriesFiltered ''' assert isinstance(accessId, int), 'Invalid access id %s' % accessId sql = self.session().query(distinct(EntryMapped.Position)) sql = sql.join(self.EntryFilter).join(self.AclAccess).join(self.Acl) sql = sql.filter(self.AclAccess.accessId == accessId).filter( self.AclIdentifier == identifier) sql = sql.order_by(EntryMapped.Position) return iterateCollection(sql, **options)
def _setup_defaults(self): """setting up the defaults """ # fill the asset_types_comboBox with all the asset types from the db all_types = map(lambda x: x[0], db.query(distinct(Asset.type)).all()) if conf.default_asset_type_name not in all_types: all_types.append(conf.default_asset_type_name) logger.debug('all_types: %s' % all_types) self.asset_types_comboBox.addItems(all_types)
def org_roles(self, org_id): SchemaModel = get_model('rbacscheme') #----check if the organization is a global organization Org = self.model.get(org_id) RoleModel = get_model('role') RPRModel = get_model('Role_Perm_Rel') if Org.rbacscheme: query = select([self.model.c.id, self.model.c.name, self.model.c.rbacscheme, SchemaModel.c.id.label('schema_id'), SchemaModel.c.name.label('schema_name')]).select_from(join(self.model.table, SchemaModel.table, self.model.c.rbacscheme == SchemaModel.c.id)).where(self.model.c.id == org_id) OrgObj = do_(query).fetchone() query = select([distinct(RoleModel.c.id), RoleModel.c.name]).select_from(join(RoleModel.table, RPRModel.table, RoleModel.c.id == RPRModel.c.role)).where(RPRModel.c.scheme == OrgObj.rbacscheme) else: #----global organization query = select([self.model.c.id, self.model.c.name, self.model.c.rbacscheme, SchemaModel.c.id.label('schema_id'), SchemaModel.c.name.label('schema_name')]).select_from(join(self.model.table, SchemaModel.table, self.model.c.id == SchemaModel.c.gorg)).where(self.model.c.id == org_id) OrgObj = do_(query).fetchone() query = select([distinct(RoleModel.c.id), RoleModel.c.name]).select_from(join(RoleModel.table, RPRModel.table, RoleModel.c.id == RPRModel.c.role)).where(RPRModel.c.scheme == OrgObj.schema_id) #----need to filter the rols which belone to this schema roleList = do_(query) roleDict = {} for s in roleList: roleDict[s.id] = s.name return {'orgid':org_id, 'orgname':OrgObj.name, 'schemaname':OrgObj.schema_name, 'schema':OrgObj.rbacscheme , 'schema_id':OrgObj.schema_id, 'roleDict':roleDict}
def mspectra(self, scanid, mslevel=None): """Returns dict with peaks of a scan Also returns the cutoff applied to the scan and mslevel, precursor.id (parent scan id) and precursor.mz scanid Scan identifier of scan of which to return the mspectra mslevel Ms level on which the scan must be. Optional. If scanid not on mslevel raises ScanNotFound """ scanq = self.session.query(Scan).filter(Scan.scanid == scanid) if (mslevel is not None): scanq = scanq.filter(Scan.mslevel == mslevel) try: scan = scanq.one() except NoResultFound: raise ScanNotFound() # lvl1 scans use absolute cutoff, lvl>1 use ratio of basepeak as cutoff if (scan.mslevel == 1): cutoff = self.session.query(Run.ms_intensity_cutoff).scalar() else: rel_cutoff = Scan.basepeakintensity * Run.msms_intensity_cutoff q = self.session.query(rel_cutoff) cutoff = q.filter(Scan.scanid == scanid).scalar() peaks = [] for peak in self.session.query(Peak).filter_by(scanid=scanid): peaks.append({ 'mz': peak.mz, 'intensity': peak.intensity, 'assigned_molid': peak.assigned_molid, }) precursor = {'id': scan.precursorscanid, 'mz': scan.precursormz} response = {'peaks': peaks, 'cutoff': cutoff, 'mslevel': scan.mslevel, 'precursor': precursor, } if (scan.mslevel == 1): fragments = [] fragq = self.session.query(distinct(Fragment.mz).label('mz')) fragq = fragq.filter_by(scanid=scanid) for fragment in fragq: fragments.append({'mz': fragment.mz}) response['fragments'] = fragments return response
def get_students(self, tenant, student_ids): ''' Returns a query that gives a list of distinct student guids given that a list of student guids are supplied ''' queries = [] for fact_table_name in [Constants.FACT_ASMT_OUTCOME_VW, Constants.FACT_BLOCK_ASMT_OUTCOME]: fact_table = self.connector.get_table(fact_table_name) query = select([distinct(fact_table.c.student_id)], from_obj=[fact_table]) query = query.where(and_(fact_table.c.rec_status == Constants.CURRENT, fact_table.c.student_id.in_(student_ids))) queries.append(query) return queries
def party_counts(self): party_counts = db_session.query(Party.name, func.count(distinct(Person.id)))\ .join(Candidacy)\ .join(Election)\ .filter(Election.age == self.age)\ .join(Person)\ .join(cosponsorship)\ .join(Bill)\ .filter(Bill.id == self.id)\ .group_by(Party.id) return [(party, int(count)) for party, count in party_counts]
def getANDPosts(self, tags, limit=100, extra_items=None): q = self.DBsession().query(Post).join(Post.tags) if extra_items: q = self._dict2ToQuery(q, extra_items) if self.board: q = q.filter(Post.board == self.board) q = q.filter(Tag.name.in_(tags)).group_by(Post.image_id) q = q.having(func.count(distinct(Tag.name)) == len(tags)) q = q.order_by(Post.post_id.desc()) if limit: q = q.limit(limit) return q.all()
def party_counts(self): party_counts = db_session.query(Party.name, func.count(distinct(Person.id)))\ .join(Candidacy)\ .join(Election)\ .filter(Election.assembly_id == self.assembly_id)\ .join(Person)\ .join(cosponsorship)\ .join(Bill)\ .filter(Bill.id == self.id)\ .group_by(Party.id) return [(party, int(count)) for party, count in party_counts]
def count_post_viewers( self, start_date=None, end_date=None, as_agent=True): from .post import Post from .action import ViewPost query = self.db.query( func.count(distinct(ViewPost.actor_id))).join(Post).filter( Post.discussion_id == self.id) if start_date: query = query.filter(ViewPost.creation_date >= start_date) if end_date: query = query.filter(ViewPost.creation_date < end_date) return query.first()[0]
def prepare_pre_cache(tenant, state_code, batch_guid): ''' prepare which state and district are pre-cached :param string tenant: name of the tenant :param string state_code: stateCode representing the state :param last_pre_cached: dateTime of the last precached :rType: list :return: list of results containing district guids ''' with EdCoreDBConnection(tenant=tenant) as connector: fact_asmt_outcome_vw = connector.get_table( Constants.FACT_ASMT_OUTCOME_VW) fact_block_asmt_outcome = connector.get_table( Constants.FACT_BLOCK_ASMT_OUTCOME) query_fao = select([ distinct(fact_asmt_outcome_vw.c.district_id).label( Constants.DISTRICT_ID) ], from_obj=[fact_asmt_outcome_vw]) query_fao = query_fao.where( fact_asmt_outcome_vw.c.state_code == state_code) query_fao = query_fao.where( and_(fact_asmt_outcome_vw.c.batch_guid == batch_guid)) query_fao = query_fao.where( and_(fact_asmt_outcome_vw.c.rec_status == Constants.CURRENT)) query_fbao = select([ distinct(fact_block_asmt_outcome.c.district_id).label( Constants.DISTRICT_ID) ], from_obj=[fact_block_asmt_outcome]) query_fbao = query_fbao.where( fact_block_asmt_outcome.c.state_code == state_code) query_fbao = query_fbao.where( and_(fact_block_asmt_outcome.c.batch_guid == batch_guid)) query_fbao = query_fbao.where( and_(fact_block_asmt_outcome.c.rec_status == Constants.CURRENT)) results = connector.get_result(query_fao.union(query_fbao)) return results
def count_post_viewers( self, start_date=None, end_date=None, as_agent=True): from .post import Post from .action import ViewPost from sqlalchemy.sql.expression import distinct query = self.db.query( func.count(distinct(ViewPost.actor_id))).join(Post).filter( Post.discussion_id == self.id) if start_date: query = query.filter(ViewPost.creation_date >= start_date) if end_date: query = query.filter(ViewPost.creation_date < end_date) return query.first()[0]
def prepare_pre_cache(public_tenant): ''' prepare which state and district are pre-cached :param string tenant: name of the tenant :rType: list :return: list of results containing state_code ''' with EdMigratePublicConnection(public_tenant) as connector: fact_asmt_outcome_vw = connector.get_table(Constants.FACT_ASMT_OUTCOME_VW) fact_block_asmt_outcome = connector.get_table(Constants.FACT_BLOCK_ASMT_OUTCOME) query_fao = select([distinct(fact_asmt_outcome_vw.c.state_code).label(Constants.STATE_CODE), fact_asmt_outcome_vw.c.district_id.label(Constants.DISTRICT_ID), fact_asmt_outcome_vw.c.school_id.label(Constants.SCHOOL_ID)], from_obj=[fact_asmt_outcome_vw]) query_fao = query_fao.where(and_(fact_asmt_outcome_vw.c.rec_status == Constants.CURRENT)) query_fbao = select([distinct(fact_block_asmt_outcome.c.state_code).label(Constants.STATE_CODE), fact_block_asmt_outcome.c.district_id.label(Constants.DISTRICT_ID), fact_block_asmt_outcome.c.school_id.label(Constants.SCHOOL_ID)], from_obj=[fact_block_asmt_outcome]) query_fbao = query_fbao.where(and_(fact_block_asmt_outcome.c.rec_status == Constants.CURRENT)) query = query_fao.union(query_fbao).order_by(Constants.STATE_CODE, Constants.DISTRICT_ID, Constants.SCHOOL_ID) results = connector.get_result(query) return results
def prepare_pre_cache(tenant, state_code, batch_guid): ''' prepare which state and district are pre-cached :param string tenant: name of the tenant :param string state_code: stateCode representing the state :param last_pre_cached: dateTime of the last precached :rType: list :return: list of results containing district guids ''' with EdCoreDBConnection(tenant=tenant) as connector: fact_asmt_outcome_vw = connector.get_table(Constants.FACT_ASMT_OUTCOME_VW) fact_block_asmt_outcome = connector.get_table(Constants.FACT_BLOCK_ASMT_OUTCOME) query_fao = select([distinct(fact_asmt_outcome_vw.c.district_id).label(Constants.DISTRICT_ID)], from_obj=[fact_asmt_outcome_vw]) query_fao = query_fao.where(fact_asmt_outcome_vw.c.state_code == state_code) query_fao = query_fao.where(and_(fact_asmt_outcome_vw.c.batch_guid == batch_guid)) query_fao = query_fao.where(and_(fact_asmt_outcome_vw.c.rec_status == Constants.CURRENT)) query_fbao = select([distinct(fact_block_asmt_outcome.c.district_id).label(Constants.DISTRICT_ID)], from_obj=[fact_block_asmt_outcome]) query_fbao = query_fbao.where(fact_block_asmt_outcome.c.state_code == state_code) query_fbao = query_fbao.where(and_(fact_block_asmt_outcome.c.batch_guid == batch_guid)) query_fbao = query_fbao.where(and_(fact_block_asmt_outcome.c.rec_status == Constants.CURRENT)) results = connector.get_result(query_fao.union(query_fbao)) return results
def gold_buyers_on(date): start_date = datetime.datetime.combine(date, datetime.time.min) end_date = datetime.datetime.combine(date, datetime.time.max) NON_REVENUE_STATUSES = ("declined", "chargeback", "fudge") date_expr = func.timezone(TIMEZONE.zone, gold_table.c.date) query = (select([distinct(gold_table.c.account_id)]) .where(~ gold_table.c.status.in_(NON_REVENUE_STATUSES)) .where(date_expr >= start_date) .where(date_expr <= end_date) .where(gold_table.c.pennies > 0) ) rows = ENGINE.execute(query) return [int(account_id) for (account_id,) in rows.fetchall() if account_id]
def getSubActions(self, identifier, parentPath, **options): ''' @see: IUserRbacService.getSubActions ''' assert isinstance(parentPath, str), 'Invalid parent path %s' % parentPath rbacId = self.findRbacId(identifier) if rbacId is None: return emptyCollection(**options) sql = self.session().query(distinct(RightAction.actionPath)) sql = sql.filter(RightAction.categoryId.in_(self.sqlRights(rbacId))) # @UndefinedVariable sql = sql.filter(RightAction.actionPath.like('%s.%%' % parentPath)) # @UndefinedVariable return processCollection(listRootPaths((path for path, in sql.all()), len(parentPath) + 1), **options)
def event_notification(event_id): """Sends a notification mail informing all attendees to bring their tickets with them.""" blogger.info('sending notifications for event %s' % event_id) users = Session.query(distinct(User.id))\ .join(Order, TicketOrder, TicketType, TicketTypeEventPartAssociation, EventPart, Event)\ .filter(Event.id==event_id)\ .filter(Order.status==PURCHASED)\ .all() count = 0 for user_id, in users: batchnr = count / 20 # twenty mails per batch = 2 minute bursts single_notification_mail.apply_async(args=[event_id, user_id], countdown=batchnr * 5 * 60) count += 1
def prepare_pre_cache(public_tenant): ''' prepare which state and district are pre-cached :param string tenant: name of the tenant :rType: list :return: list of results containing state_code ''' with EdMigratePublicConnection(public_tenant) as connector: fact_asmt_outcome_vw = connector.get_table( Constants.FACT_ASMT_OUTCOME_VW) fact_block_asmt_outcome = connector.get_table( Constants.FACT_BLOCK_ASMT_OUTCOME) query_fao = select([ distinct(fact_asmt_outcome_vw.c.state_code).label( Constants.STATE_CODE), fact_asmt_outcome_vw.c.district_id.label(Constants.DISTRICT_ID), fact_asmt_outcome_vw.c.school_id.label(Constants.SCHOOL_ID) ], from_obj=[fact_asmt_outcome_vw]) query_fao = query_fao.where( and_(fact_asmt_outcome_vw.c.rec_status == Constants.CURRENT)) query_fbao = select([ distinct(fact_block_asmt_outcome.c.state_code).label( Constants.STATE_CODE), fact_block_asmt_outcome.c.district_id.label(Constants.DISTRICT_ID), fact_block_asmt_outcome.c.school_id.label(Constants.SCHOOL_ID) ], from_obj=[fact_block_asmt_outcome]) query_fbao = query_fbao.where( and_(fact_block_asmt_outcome.c.rec_status == Constants.CURRENT)) query = query_fao.union(query_fbao).order_by(Constants.STATE_CODE, Constants.DISTRICT_ID, Constants.SCHOOL_ID) results = connector.get_result(query) return results
def generate_query_for_iab(connection, student_ids, asmt_year): fact_table = connection.get_table(Constants.FACT_BLOCK_ASMT_OUTCOME) dim_asmt = connection.get_table(Constants.DIM_ASMT) query = Select([distinct(fact_table.c.student_id).label(Constants.STUDENT_ID), fact_table.c.state_code.label(Constants.STATE_CODE), dim_asmt.c.asmt_period_year.label(Constants.ASMT_PERIOD_YEAR), fact_table.c.district_id.label(Constants.DISTRICT_ID), fact_table.c.school_id.label(Constants.SCHOOL_ID)], from_obj=[fact_table .join(dim_asmt, and_(dim_asmt.c.asmt_rec_id == fact_table.c.asmt_rec_id, dim_asmt.c.rec_status == Constants.CURRENT, dim_asmt.c.asmt_type == AssessmentType.INTERIM_ASSESSMENT_BLOCKS, dim_asmt.c.asmt_period_year == asmt_year))]) query = query.where(and_(fact_table.c.rec_status == Constants.CURRENT, fact_table.c.student_id.in_(student_ids))) return query
def purge_entity_data(instance: Recorder, entity_filter: Callable[[str], bool]) -> bool: """Purge states and events of specified entities.""" with session_scope(session=instance.get_session()) as session: # type: ignore[misc] selected_entity_ids: list[str] = [ entity_id for (entity_id,) in session.query(distinct(States.entity_id)).all() if entity_filter(entity_id) ] _LOGGER.debug("Purging entity data for %s", selected_entity_ids) if len(selected_entity_ids) > 0: # Purge a max of MAX_ROWS_TO_PURGE, based on the oldest states or events record _purge_filtered_states(instance, session, selected_entity_ids) _LOGGER.debug("Purging entity data hasn't fully completed yet") return False return True
def generate_query_for_summative_or_interim(connection, asmt_type, student_ids, asmt_year, date_taken): fact_table = connection.get_table(Constants.FACT_ASMT_OUTCOME_VW) dim_asmt = connection.get_table(Constants.DIM_ASMT) query = Select([distinct(fact_table.c.student_id).label(Constants.STUDENT_ID), fact_table.c.state_code.label(Constants.STATE_CODE), dim_asmt.c.asmt_period_year.label(Constants.ASMT_PERIOD_YEAR), fact_table.c.date_taken.label(Constants.DATETAKEN), fact_table.c.district_id.label(Constants.DISTRICT_ID), fact_table.c.school_id.label(Constants.SCHOOL_ID), fact_table.c.asmt_grade.label(Constants.ASMT_GRADE)], from_obj=[fact_table .join(dim_asmt, and_(dim_asmt.c.asmt_rec_id == fact_table.c.asmt_rec_id, dim_asmt.c.rec_status == Constants.CURRENT, dim_asmt.c.asmt_type == asmt_type, dim_asmt.c.asmt_period_year == asmt_year))]) query = query.where(and_(fact_table.c.rec_status == Constants.CURRENT, fact_table.c.student_id.in_(student_ids))) query = query.order_by(fact_table.c.student_id, fact_table.c.date_taken) if date_taken is not None: query = query.where(and_(fact_table.c.date_taken == date_taken)) return query
def get_select_for_state_view(dim_inst_hier, state_code): return select([distinct(dim_inst_hier.c.district_id).label(Constants.ID), dim_inst_hier.c.district_name.label(Constants.NAME)], from_obj=[dim_inst_hier]).where(dim_inst_hier.c.state_code == state_code)
def get_select_for_district_view(dim_inst_hier, state_code, district_id): return select([distinct(dim_inst_hier.c.school_id).label(Constants.ID), dim_inst_hier.c.school_name.label(Constants.NAME)], from_obj=[dim_inst_hier])\ .where(and_(dim_inst_hier.c.state_code == state_code, dim_inst_hier.c.district_id == district_id))