def get_committee(self, args, page_num, per_page, committee_id, candidate_id): if committee_id is not None: committees = CommitteeDetail.query committees = committees.filter_by(**{'committee_id': committee_id}) if candidate_id is not None: committees = CommitteeDetail.query.join(CandidateCommitteeLink).filter(CandidateCommitteeLink.candidate_id==candidate_id) for argname in ['designation', 'organization_type', 'committee_type']: if args.get(argname): if ',' in args[argname]: committees = committees.filter(getattr(Committee, argname).in_(args[argname].split(','))) else: committees = committees.filter(getattr(Committee, argname)==args[argname]) # default year filtering if args.get('year') is None: earliest_year = int(sorted(default_year().split(','))[0]) # still going or expired after the earliest year we are looking for committees = committees.filter(or_(extract('year', CommitteeDetail.expire_date) >= earliest_year, CommitteeDetail.expire_date == None)) # Should this handle a list of years to make it consistent with /candidate ? elif args.get('year') and args['year'] != '*': # before expiration committees = committees.filter(or_(extract('year', CommitteeDetail.expire_date) >= int(args['year']), CommitteeDetail.expire_date == None)) # after origination committees = committees.filter(extract('year', CommitteeDetail.original_registration_date) <= int(args['year'])) count = committees.count() return count, committees.order_by(CommitteeDetail.name).paginate(page_num, per_page, False).items
def get_table_from_database(cls, db, name_or_id, session=None, d_vid=None): from databundles.orm import Table import sqlalchemy.orm.exc from sqlalchemy.sql import or_, and_ if not name_or_id: raise ValueError("Got an invalid argument: {}".format(name_or_id)) try: if d_vid: return ( session.query(Table) .filter( and_( Table.d_vid == d_vid, or_(Table.vid == name_or_id, Table.id_ == name_or_id, Table.name == name_or_id), ) ) .one() ) else: return ( session.query(Table) .filter(or_(Table.vid == name_or_id, Table.id_ == name_or_id, Table.name == name_or_id)) .one() ) except sqlalchemy.orm.exc.NoResultFound as e: raise sqlalchemy.orm.exc.NoResultFound("No table for name_or_id: {}".format(name_or_id))
def system_utilisation(system, start, end): retval = dict((k, datetime.timedelta(0)) for k in ['recipe', 'manual', 'idle_automated', 'idle_manual', 'idle_broken', 'idle_removed']) if end <= system.date_added: return retval if start <= system.date_added: start = system.date_added status_durations = system.dyn_status_durations\ .filter(and_(SystemStatusDuration.start_time < end, or_(SystemStatusDuration.finish_time >= start, SystemStatusDuration.finish_time == None)))\ .order_by(SystemStatusDuration.start_time).all() reservations = system.dyn_reservations\ .filter(and_(Reservation.start_time < end, or_(Reservation.finish_time >= start, Reservation.finish_time == None)))\ .order_by(Reservation.start_time).all() prev_finish = start for reservation in reservations: # clamp reservation start and finish to be within the period clamped_res_start = max(reservation.start_time, start) clamped_res_finish = min(reservation.finish_time or end, end) # first, do the gap from the end of the previous reservation to the # start of this one update_status_durations_in_period(retval, status_durations, prev_finish, clamped_res_start) # now do this actual reservation retval[reservation.type] += clamped_res_finish - clamped_res_start prev_finish = clamped_res_finish # lastly, do the gap from the end of the last reservation to the end of the # reporting period update_status_durations_in_period(retval, status_durations, prev_finish, end) return retval
def get_tagged_addrs(user): """Generate a list of tagged addresses for a user""" query1 = Session.query(Message.to_address) query2 = Session.query(Message.from_address) addrs = [addr.address for addr in user.addresses if '+*' not in addr.address and '-*' not in addr.address] addrs.append(user.email) tagged_addrs = [addr.address for addr in user.addresses if '+*' in addr.address or '-*' in addr.address] if tagged_addrs: tagged_opts1 = func._(or_(*[Message.to_address .like(TAGGED_RE.sub(r'\g<one>%', taddr)) for taddr in tagged_addrs])) tagged_opts2 = func._(or_(*[Message.from_address .like(TAGGED_RE.sub(r'\g<one>%', taddr)) for taddr in tagged_addrs])) query1 = query1.filter(func._( or_(tagged_opts1, Message.to_address.in_(addrs)))) query2 = query2.filter(func._( or_(tagged_opts2, Message.from_address.in_(addrs)))) else: query1 = query1.filter(Message.to_address.in_(addrs)) query2 = query2.filter(Message.from_address.in_(addrs)) query1 = query1.distinct() query2 = query2.distinct() to_addrs = [val.to_address for val in query1] from_addrs = [val.from_address for val in query2] all_addrs = set(to_addrs + from_addrs) return [str(crc32(val)) for val in all_addrs]
def test_single_query(self): search = Search(self.Donkey, "people", self.session) session = self.Donkey.Session() people_class = self.Donkey.get_class("people") email_class = self.Donkey.get_class("email") assert set(QueryFromStringParam(search, 'name < ?', pos_args = ["popp02"]).add_conditions(base_query).all()).symmetric_difference( set(session.query(people_class.id).filter(people_class.name < u"popp02").all())) == set() assert set(QueryFromStringParam(search, 'name < ? and email.email like ?', pos_args = ["popp02", "popi%"]).add_conditions(base_query).all()).symmetric_difference( set(session.query(people_class.id).join(["email"]).filter(and_(people_class.name < u"popp02", email_class.email.like(u"popi%"))).all())) == set() assert set(QueryFromStringParam(search, "name < ? and not email.email like ?", pos_args = ["popp02", "popi%"]).add_conditions(base_query).all()).symmetric_difference( set(session.query(people_class.id).outerjoin(["email"]).\ filter(and_(people_class.name < u"popp02", or_(email_class.email == None, not_(email_class.email.like(u"popi%"))))).all())) == set() assert set(QueryFromStringParam(search, "name < ? or not email.email like ?", pos_args = ["popp02", "popi%"]).add_conditions(base_query).all()).symmetric_difference( set(session.query(people_class.id).outerjoin(["email"]).\ filter(or_(people_class.name < u"popp02", or_(email_class.email == None, not_(email_class.email.like(u"popi%"))))).all())) == set() assert set(QueryFromStringParam(search, "not (name < ? or not email.email like ?) ", pos_args = ["popp02", "popi%"] ).add_conditions(base_query).all()).symmetric_difference( set(session.query(people_class.id).outerjoin(["email"]).\ filter(not_(or_(people_class.name < u"popp02", or_(email_class.email == None, not_(email_class.email.like(u"popi%")))))).all())) == set()
def filter(self): "Set filters" if self.user.is_domain_admin: dquery = self.dbsession.query(Domain.name).join(downs, (oa, downs.c.organization_id == oa.c.organization_id))\ .filter(Domain.status == True)\ .filter(oa.c.user_id == self.user.id).all() domains = [domain.name for domain in dquery] if not domains: domains.append('xx') if self.direction and self.direction == 'in': self.query = self.query\ .filter(self.model.to_domain.in_(domains)) elif self.direction and self.direction == 'out': self.query = self.query\ .filter(self.model.from_domain.in_(domains)) else: self.query = self.query.filter( func._(or_(self.model.to_domain.in_(domains), self.model.from_domain.in_(domains)))) if self.user.is_peleb: addrs = [addr.address for addr in self.user.addresses] addrs.append(self.user.email) if self.direction and self.direction == 'in': self.query = self.query\ .filter(self.model.to_address.in_(addrs)) elif self.direction and self.direction == 'out': self.query = self.query\ .filter(self.model.from_address.in_(addrs)) else: self.query = self.query\ .filter(func._( or_(self.model.to_address.in_(addrs), self.model.from_address.in_(addrs)))) return self.query
def security_cb(r, feature, o): # we need both the "original" and "new" geometry to be # within the restriction area geom_attr, srid = self._get_geom_col_info(layer) geom_attr = getattr(o, geom_attr) geom = feature.geometry allowed = DBSession.query(func.count(RestrictionArea.id)) allowed = allowed.join(RestrictionArea.roles) allowed = allowed.join(RestrictionArea.layers) allowed = allowed.filter(RestrictionArea.readwrite == True) allowed = allowed.filter(Role.id == self.request.user.role.id) allowed = allowed.filter(Layer.id == layer.id) none = None # the only way I found to remove the pep8 warning allowed = allowed.filter(or_( RestrictionArea.area == none, RestrictionArea.area.gcontains(geom_attr) )) if geom and not isinstance(geom, geojson.geometry.Default): shape = asShape(geom) spatial_elt = WKBSpatialElement(buffer(shape.wkb), srid=srid) allowed = allowed.filter(or_( RestrictionArea.area == none, RestrictionArea.area.gcontains(spatial_elt) )) if allowed.scalar() == 0: raise HTTPForbidden()
def __init__(self, dbsession, user): self.dbsession = dbsession self.user = user self.query = self.dbsession.query( func.count(Message.id).label('total'), func.sum(case([(and_(Message.virusinfected == 0, Message.nameinfected == 0, Message.otherinfected == 0, Message.spam == 0, Message.highspam == 0), 1)], else_=0)).label('clean'), func.sum(case([(Message.virusinfected > 0, 1)], else_=0)).label('virii'), func.sum(case([(and_(Message.highspam == 0, Message.spam == 0, Message.virusinfected == 0, or_(Message.nameinfected > 0, Message.otherinfected > 0)), 1)], else_=0)).label('infected'), func.sum(case([(and_(Message.virusinfected == 0, Message.otherinfected == 0, Message.nameinfected == 0, or_(Message.spam > 0, Message.highspam > 0)), 1)], else_=0)).label('spam'), func.sum(case([(and_(Message.virusinfected == 0, Message.otherinfected == 0, Message.nameinfected == 0, Message.spam > 0, Message.highspam == 0), 1)], else_=0)).label('lowspam'), func.sum(case([(and_(Message.virusinfected == 0, Message.otherinfected == 0, Message.nameinfected == 0, Message.highspam > 0), 1)], else_=0)).label('highspam'))\ .filter(Message.timestamp.between( ustartday(self.user.timezone), uendday(self.user.timezone)))
def query_database(query, raven_client): macs = [lookup.mac for lookup in query.wifi] if not macs: # pragma: no cover return [] result = [] today = util.utcnow().date() temp_blocked = today - TEMPORARY_BLOCKLIST_DURATION try: load_fields = ('lat', 'lon', 'radius') shards = defaultdict(list) for mac in macs: shards[WifiShard.shard_model(mac)].append(mac) for shard, shard_macs in shards.items(): rows = ( query.session.query(shard) .filter(shard.mac.in_(shard_macs)) .filter(shard.lat.isnot(None)) .filter(shard.lon.isnot(None)) .filter(or_( shard.block_count.is_(None), shard.block_count < PERMANENT_BLOCKLIST_THRESHOLD)) .filter(or_( shard.block_last.is_(None), shard.block_last < temp_blocked)) .options(load_only(*load_fields)) ).all() result.extend(list(rows)) except Exception: raven_client.captureException() return result
def __init__(self, dbsession, user): self.dbsession = dbsession self.user = user self.query = self.dbsession.query( func.count(Message.id).label('total'), func.sum(case([(and_(Message.virusinfected == 0, Message.nameinfected == 0, Message.otherinfected == 0, Message.spam == 0, Message.highspam == 0), 1)], else_=0)).label('clean'), func.sum(case([(Message.virusinfected > 0, 1)], else_=0)).label('virii'), func.sum(case([(and_(Message.highspam == 0, Message.spam == 0, Message.virusinfected == 0, or_(Message.nameinfected > 0, Message.otherinfected > 0)), 1)], else_=0)).label('infected'), func.sum(case([(and_(Message.virusinfected == 0, Message.otherinfected == 0, Message.nameinfected == 0, or_(Message.spam > 0, Message.highspam > 0)), 1)], else_=0)).label('spam'), func.sum(case([(and_(Message.virusinfected == 0, Message.otherinfected == 0, Message.nameinfected == 0, Message.spam > 0, Message.highspam == 0), 1)], else_=0)).label('lowspam'), func.sum(case([(and_(Message.virusinfected == 0, Message.otherinfected == 0, Message.nameinfected == 0, Message.highspam > 0), 1)], else_=0)).label('highspam'))\ .filter(Message.date == now().date())
def by_title(cls, keyword, locale=None): """Search a music with its title and localized title. Whitespaces are ignored. """ """# Search a music its title exactly matched to keyword""" music = session.query(cls).filter(cls.title == keyword).first() if music: return music """# Search a music its title includes keyword""" reduced_keyword = keyword.replace(' ', '') reduced_title = func.replace(cls.title, ' ', '') music = session.query(cls).\ filter(or_( cls.title.contains(keyword), reduced_title.contains(reduced_keyword))).\ limit(1).first() if music: return music """# Search a music its localized title includes keyword""" if locale is None: return None reduced_text = func.replace(cls.Localization.text, ' ', '') music_data = session.query(cls.Localization).\ filter(cls.Localization.locale == locale).\ filter(or_( cls.Localization.text.contains(keyword), reduced_text.contains(reduced_keyword))).\ limit(1).first() if music_data is not None: return cls.by(music_id=music_data.music_id)
def check_geometry(r, feature, o): # we need both the "original" and "new" geometry to be # within the restriction area geom_attr, srid = self._get_geom_col_info(layer) geom_attr = getattr(o, geom_attr) geom = feature.geometry allowed = DBSession.query(func.count(RestrictionArea.id)) allowed = allowed.join(RestrictionArea.roles) allowed = allowed.join(RestrictionArea.layers) allowed = allowed.filter(RestrictionArea.readwrite.is_(True)) allowed = allowed.filter(Role.id == self.request.user.role.id) allowed = allowed.filter(Layer.id == layer.id) allowed = allowed.filter(or_( RestrictionArea.area.is_(None), RestrictionArea.area.ST_Contains(geom_attr) )) spatial_elt = None if geom and not isinstance(geom, geojson.geometry.Default): shape = asShape(geom) spatial_elt = from_shape(shape, srid=srid) allowed = allowed.filter(or_( RestrictionArea.area.is_(None), RestrictionArea.area.ST_Contains(spatial_elt) )) if allowed.scalar() == 0: raise HTTPForbidden() # check is geometry is valid self._validate_geometry(spatial_elt)
def search(): search_res= request.args.get('query') search_res=search_res.replace("'","") search_res=search_res.replace("!","") search_res=search_res.replace("(","") search_res=search_res.replace(")","") search_res=search_res.replace(":","") temp_val = search_res.split(" ") search_list=[] for search in temp_val: if search.isdigit(): search_data = conn.execute(select([my_cards]).where(or_( func.to_tsvector('english', my_cards.c.text).match(search, postgresql_regconfig='english'), func.to_tsvector('english', my_cards.c.name).match(search, postgresql_regconfig='english'), func.to_tsvector('english', my_cards.c.cardType).match(search, postgresql_regconfig='english'), func.to_tsvector('english', my_cards.c.subType).match(search, postgresql_regconfig='english'), func.to_tsvector('english', my_cards.c.family).match(search, postgresql_regconfig='english'), my_cards.c.attack==int(search), my_cards.c.defense==int(search)))) else: search_data = conn.execute(select([my_cards]).where(or_( func.to_tsvector('english', my_cards.c.text).match(search, postgresql_regconfig='english'), func.to_tsvector('english', my_cards.c.name).match(search, postgresql_regconfig='english'), func.to_tsvector('english', my_cards.c.cardType).match(search, postgresql_regconfig='english'), func.to_tsvector('english', my_cards.c.subType).match(search, postgresql_regconfig='english'), func.to_tsvector('english', my_cards.c.family).match(search, postgresql_regconfig='english')))) search_list+=format_list(search_data) return render_template('searchTemplate.html',search_data=search_list)
def get_by_date( cls, session, calendar, start_date, stop_date, full_day=None, no_recursive=False, name=None): """ Retrieve the list of meetings between two date. We include the start date and exclude the stop date. :kwarg full_day: Can be True, False or None. True will restrict to only meetings which take up the full day. False will only select meetings which do not take the full day. None will not restrict. Default to None :kwarg no_recursive: a boolean specifying whether the list of meetings returned should exclude recursive meetings. Default to False, if True recursive meetings will be excluded. :kwarg name: Defaults to None, if set the meetings returned will be filtered for this string in their name. """ query = session.query( cls ).filter( Meeting.calendar == calendar ).filter( or_( and_( (Meeting.meeting_date >= start_date), (Meeting.meeting_date <= stop_date), ), and_( (Meeting.meeting_date_end >= start_date), (Meeting.meeting_date_end <= stop_date), ), and_( (Meeting.meeting_date <= start_date), (Meeting.meeting_date_end >= stop_date), ), ) ).order_by( Meeting.meeting_date, Meeting.meeting_time_start, Meeting.meeting_name) if full_day is not None: query = query.filter(Meeting.full_day == full_day) if no_recursive: query = query.filter(Meeting.recursion_frequency == None) else: query = query.filter( or_( (Meeting.recursion_ends >= Meeting.meeting_date), (Meeting.recursion_frequency == None), ) ) if name: query = query.filter( Meeting.meeting_name.ilike('%%%s%%' % name) ) return query.all()
def list(self, datasets=None, locations = None, key='vid'): """ :param datasets: If specified, must be a dict, which the internal dataset data will be put into. :return: vnames of the datasets in the library. """ from ..orm import Dataset, Partition from .files import Files from sqlalchemy.sql import or_ if datasets is None: datasets = {} q1 = (self.session.query(Dataset, Partition).join(Partition) .filter(Dataset.vid != ROOT_CONFIG_NAME_V)) q2 = (self.session.query(Dataset) .filter(Dataset.vid != ROOT_CONFIG_NAME_V)) if locations: if not isinstance(locations,(list, tuple)): locations=[locations] terms = [ Dataset.location == location for location in locations] q1 = q1.filter(or_(*terms)) q2 = q2.filter(or_(*terms)) for d,p in (q1.all() + [ (d,None) for d in q2.all()]): ck = getattr(d.identity, key) if ck not in datasets: dsid = d.identity datasets[ck] = dsid else: dsid = datasets[ck] # The dataset locations are linked to the identity locations dsid.locations.set(d.location) if p and ( not datasets[ck].partitions or p.vid not in datasets[ck].partitions): pident = p.identity pident.locations.set(d.location) datasets[ck].add_partition(pident) if d.location == Files.TYPE.SOURCE: files = Files(self) f = files.query.type(Files.TYPE.SOURCE).ref(dsid.vid).one_maybe if f: dsid.bundle_state = f.state return datasets
def createView(self): # make sure the relevant database tables exist! metadata = Base.metadata metadata.create_all(self.db.engine, checkfirst=True) # filter indexes catalog = self.env.catalog.index_catalog xmlindex_list = catalog.getIndexes(package_id='seismology', resourcetype_id='station') filter = ['network_id', 'location_id', 'station_id', 'channel_id', 'latitude', 'longitude', 'start_datetime', 'end_datetime'] xmlindex_list = [x for x in xmlindex_list if x.label in filter] if not xmlindex_list: return # build up query query, joins = catalog._createIndexView(xmlindex_list, compact=True) options = [ sql.functions.max(WaveformChannel.endtime).label("latest_activity"), # XXX: not UTC!!!! (sql.func.now() - sql.functions.max(WaveformChannel.endtime)).label("latency"), (sql.literal_column("end_datetime.keyval") == None).label("active"), sql.func.random().label("random"), sql.func.GeomFromText( sql.text("'POINT(' || longitude.keyval || ' ' || " + \ "latitude.keyval || ')', 4326")).label('geom') ] for option in options: query.append_column(option) oncl = WaveformChannel.network == sql.literal_column("network_id.keyval") oncl = sql.and_(oncl, WaveformChannel.station == sql.literal_column("station_id.keyval")) oncl = sql.and_(oncl, WaveformChannel.channel == sql.literal_column("channel_id.keyval")) oncl = sql.and_(oncl, sql.or_( WaveformChannel.location == sql.literal_column("location_id.keyval"), sql.and_( WaveformChannel.location == None, sql.literal_column("location_id.keyval") == None ))) oncl = sql.and_(oncl, WaveformChannel.endtime > sql.literal_column("start_datetime.keyval")) oncl = sql.and_(oncl, sql.or_( WaveformChannel.endtime <= sql.literal_column("end_datetime.keyval"), sql.literal_column("end_datetime.keyval") == None )) #joins = joins.join("default_waveform_channels", onclause=oncl) query = query.select_from(joins).group_by( document_tab.c['id'], document_meta_tab.c['datetime'], sql.literal_column("station_id.keyval"), sql.literal_column("channel_id.keyval"), sql.literal_column("network_id.keyval"), sql.literal_column("location_id.keyval"), sql.literal_column("latitude.keyval"), sql.literal_column("longitude.keyval"), sql.literal_column("start_datetime.keyval"), sql.literal_column("end_datetime.keyval")) return util.compileStatement(query)
def between(column): return sql.and_( sql.or_( column >= sql.bindparam("start_date"), sql.bindparam("start_date") == None), sql.or_( column <= sql.bindparam("end_date"), sql.bindparam("end_date") == None), )
def query_overrides(request): db = request.db data = request.validated query = db.query(BuildrootOverride) expired = data.get('expired') if expired is not None: if expired: query = query.filter(BuildrootOverride.expired_date!=None) else: query = query.filter(BuildrootOverride.expired_date==None) packages = data.get('packages') if packages is not None: query = query.join(BuildrootOverride.build).join(Build.package) query = query.filter(or_(*[Package.name==pkg.name for pkg in packages])) releases = data.get('releases') if releases is not None: query = query.join(BuildrootOverride.build).join(Build.release) query = query.filter(or_(*[Release.name==r.name for r in releases])) like = data.get('like') if like is not None: query = query.join(BuildrootOverride.build) query = query.filter(or_(*[ Build.nvr.like('%%%s%%' % like) ])) submitter = data.get('user') if submitter is not None: query = query.filter(BuildrootOverride.submitter==submitter) query = query.order_by(BuildrootOverride.submission_date.desc()) # We can't use ``query.count()`` here because it is naive with respect to # all the joins that we're doing above. count_query = query.with_labels().statement\ .with_only_columns([func.count(distinct(BuildrootOverride.id))])\ .order_by(None) total = db.execute(count_query).scalar() page = data.get('page') rows_per_page = data.get('rows_per_page') pages = int(math.ceil(total / float(rows_per_page))) query = query.offset(rows_per_page * (page - 1)).limit(rows_per_page) return dict( overrides=query.all(), page=page, pages=pages, rows_per_page=rows_per_page, total=total, chrome=data.get('chrome'), display_user=data.get('display_user'), )
def make_sa_expression(self, search, root_table): self.search = search self.root_table = root_table if self.table: table = self.table else: table = root_table self.table_class = self.search.name_to_alias[table] database = self.search.database # FIXME This hack gets the search working again but # should be fixed further back in the code if self.search.aliased_name_path[table] == 'root': rtable = database[table] else: rtable = database[self.search.aliased_name_path[table].table] if self.field == "id": self.type = sa.Integer else: self.type = rtable.columns[self.field].type self.parsed_values = [] self.parse_values() field = getattr(self.table_class, self.field) table_class = self.table_class val = self.parsed_values if self.operator == "<": return or_(field < val[0], table_class.id == None) if self.operator == "<=": return or_(field <= val[0], table_class.id == None) if self.operator == "<>": return or_(field <> val[0], table_class.id == None) if self.operator == "=": return and_( field == val[0], table_class.id <> None) if self.operator == ">": return and_(field > val[0], table_class.id <> None) if self.operator == ">=": return and_(field >= val[0], table_class.id <> None) if self.operator == "in": return and_(field.in_(list(val)), table_class.id <> None) if self.operator == "between": return and_(field.between(val[0], val[1]), table_class.id <> None) if self.operator == "like": return and_(field.ilike(val[0]), table_class.id <> None) if self.operator == "is" and val[0]: return field == None if self.operator == "is" and not val[0]: return field <> None
def delete(self): db.session.delete(self) db.session.commit() # 删除时如果城市没有其他路线取消发布状态 if not MapRelationship.query.filter(or_(MapRelationship.city_from == self.city_from, MapRelationship.city_to == self.city_from)).first(): tmp_city = MapCity.query.filter_by(identify=self.city_from).first() tmp_city.xor_publish() if not MapRelationship.query.filter(or_(MapRelationship.city_from == self.city_to, MapRelationship.city_to == self.city_to)).first(): tmp_city = MapCity.query.filter_by(identify=self.city_to).first() tmp_city.xor_publish()
def listed(cls, ip_address, scopes=None): scope_q = cls.scope == None if scopes is not None: scope_q = or_(scope_q, cls.scope.in_(scopes)) return and_( scope_q, cls.active == True, cls.ip_address.op(">>=")(ip_address), or_(cls.active_until == None, cls.active_until >= func.now()), )
def vacuum_zones(self, age_days=None): """ Destroy zones older than age_days """ self._begin_op() db_session = self.db_session db_query_slice = get_numeric_setting('db_query_slice', int) age_days_from_config = float(zone_cfg.get_row_exc(db_session, key='zone_del_age')) if age_days_from_config <= 0 and age_days is None: age_days = get_numeric_setting('zone_del_off_age', float) elif age_days is None: age_days = age_days_from_config age_days = timedelta(days=age_days) count = 0 # Clear old and nuked zones one by one id_query = db_session.query(ZoneSM.id_)\ .filter(ZoneSM.state == ZSTATE_DELETED)\ .filter(or_(ZoneSM.deleted_start == None, (func.now() - ZoneSM.deleted_start) > age_days))\ .filter(ZoneSM.zone_files == False)\ .yield_per(db_query_slice) id_results = [] for zone_id, in id_query: id_results.append(zone_id) for zone_id in id_results: try: zone_sm = db_session.query(ZoneSM)\ .filter(ZoneSM.id_ == zone_id).one() except NoResultFound: continue if zone_sm.state != ZSTATE_DELETED: # Skip this if a customer has undeleted zone in the mean time.. continue db_session.delete(zone_sm) db_session.commit() count += 1 # Finally do zone_sm destroy operation to query = db_session.query(ZoneSM)\ .filter(ZoneSM.state == ZSTATE_DELETED)\ .filter(or_(ZoneSM.deleted_start == None, (func.now() - ZoneSM.deleted_start) > age_days)) for zone_sm in query: if zone_sm.state != ZSTATE_DELETED: # Skip this if a customer has undeleted zone in the mean time.. continue try: exec_zonesm(zone_sm, ZoneSMDoDestroy) except ZoneSmFailure: continue count += 1 result = {'num_deleted': count} self._finish_op() return result
def snps_in_chrom_ranges(self, chrom, ranges): table = self.snp_table cols = table.c range_bins = [bin_ranges(start, end) for start, end in ranges] range_stmt = [ and_(cols.bin.in_(bin), or_(cols.chromStart.between(start, end), cols.chromEnd.between(start, end))) for bin, (start, end) in zip(range_bins, ranges) ] s = select([table], and_(cols.chrom == "chr%s" % chrom, or_(*range_stmt))) result = self.execute_statement(s) snps = result.fetchall() return snps
def query_users(request): db = request.db data = request.validated query = db.query(User) like = data.get('like') if like is not None: query = query.filter(or_(*[ User.name.like('%%%s%%' % like) ])) name = data.get('name') if name is not None: query = query.filter(User.name.like(name)) groups = data.get('groups') if groups is not None: query = query.join(User.groups) query = query.filter(or_(*[Group.id==grp.id for grp in groups])) updates = data.get('updates') if updates is not None: query = query.join(User.updates) args = \ [Update.title==update.title for update in updates] +\ [Update.alias==update.alias for update in updates] query = query.filter(or_(*args)) packages = data.get('packages') if packages is not None: query = query.join(User.packages) query = query.filter(or_(*[Package.id==p.id for p in packages])) # We can't use ``query.count()`` here because it is naive with respect to # all the joins that we're doing above. count_query = query.with_labels().statement\ .with_only_columns([func.count(distinct(User.id))])\ .order_by(None) total = request.db.execute(count_query).scalar() page = data.get('page') rows_per_page = data.get('rows_per_page') pages = int(math.ceil(total / float(rows_per_page))) query = query.offset(rows_per_page * (page - 1)).limit(rows_per_page) return dict( users=query.all(), page=page, pages=pages, rows_per_page=rows_per_page, total=total, )
def XY(): at=analysis_tbl('i0','o2',whereclause=[ #basically filters (flights.c.YEAR> 2005) & or_(*[(flights.c.ORIGIN==aa) for aa in airports]) #python is awesome & or_(*[(flights.c.DEST ==aa) for aa in airports]) # ]) tbl=rsql(str(lq(at)),engine.raw_connection()) tbl.dropna(subset=tbl.columns,how='any',inplace=True) yl=['anon_1'] xl=list(set(tbl.columns)-set(yl)) x,y= tbl[xl],tbl[yl] return x,y.values.T[0]
def _job_get_next_by_action(session, now, action, max_retry): job_ref = ( session.query(models.Job) .options(sa_orm.subqueryload("job_metadata")) .filter_by(action=action) .filter(models.Job.retry_count < max_retry) .filter(models.Job.hard_timeout > now) .filter(sa_sql.or_(~models.Job.status.in_(["DONE", "CANCELLED"]), models.Job.status == None)) .filter(sa_sql.or_(models.Job.worker_id == None, models.Job.timeout <= now)) .order_by(models.Job.updated_at.asc()) .first() ) return job_ref
def unban_user(self, username, user_id): session = self.sessionmaker() results = session.query(Ban).filter(Ban.Enabled==True).filter(or_(Ban.ExpirationDate == None, Ban.ExpirationDate > datetime.datetime.now())) results = results.filter(or_(Ban.Username==username.lower(), Ban.userID==user_id)) if results: for result in results: session.delete(result.ban) session.commit() session.close() return 'Successfully unbanned %s.' % username else: session.close() return 'No matching bans for %s.' % username
def _job_get_next_by_action(session, now, action, max_retry): job_ref = session.query(models.Job)\ .options(sa_orm.subqueryload('job_metadata'))\ .filter_by(action=action)\ .filter(models.Job.retry_count < max_retry)\ .filter(models.Job.hard_timeout > now)\ .filter(sa_sql.or_(~models.Job.status.in_(['DONE', 'CANCELLED']), models.Job.status == None))\ .filter(sa_sql.or_(models.Job.worker_id == None, models.Job.timeout <= now))\ .order_by(models.Job.created_at.asc())\ .first() return job_ref
def convert_where(self, node): for item in node: to_not = True if item == "not" else False to_and = True if item == "and" else False to_or = True if item == "or" else False if any([to_not, to_and, to_or]): break if node.operator: if node.table: table_class = self.search.name_to_alias[node.table] else: table_class = self.search.name_to_alias[self.search.table] field = getattr(table_class, node.field) if node.operator == "<": return or_(field < node.value, table_class.id == None) if node.operator == "<=": return or_(field <= node.value, table_class.id == None) if node.operator == "<>": return or_(field <> node.value, table_class.id == None) if node.operator == "=": return and_( field == node.value, table_class.id <> None) if node.operator == ">": return and_(field > node.value, table_class.id <> None) if node.operator == ">=": return and_(field >= node.value, table_class.id <> None) if node.operator == "in": return and_(field.in_(list(node.value)), table_class.id <> None) if node.operator == "between": return and_(field.between(node.value, node.value2), table_class.id <> None) if node.operator == "like": return and_(field.ilike(node.value), table_class.id <> None) if node.operator == "is": return field == None if node.operator == "not": return field <> None if to_not: return not_(self.convert_where(node[1])) if to_or: ors = [self.convert_where(stat) for stat in node[0::2]] return or_(*ors) if to_and: ors = [self.convert_where(stat) for stat in node[0::2]] return and_(*ors) raise
def get_feed_entries(self): max_count=20 entries = Session.query(BlogEntry)[:max_count] myor = or_() for friend in self.friends: myor.append(Spotlight.uid == friend.id) if len(myor)>0: entries.extend(Session.query(Spotlight).filter( and_(myor, Spotlight.active==True)).\ order_by(sql.desc(Spotlight.timestamp))\ [:max_count]) commentor = or_() spotlightor = or_() for friend in self.friends: commentor.append(SpotlightComment.uid == friend.id) spotlightor.append(Spotlight.uid == friend.id) if len(commentor)>0 and len(spotlightor)>0: entries.extend(Session.query(SpotlightComment).\ join((Spotlight, SpotlightComment.spotlight)).\ filter(and_( SpotlightComment.uid!=session['userid'], or_(Spotlight.uid==session['userid'], and_(commentor, spotlightor)), Spotlight.active == True)).\ order_by(sql.desc(SpotlightComment.timestamp))[:max_count]) entries.extend(Session.query(Recommendation).\ filter(and_( Recommendation.recommendeefbid == self.fbid, Recommendation.active == True))[:max_count]) def sort_by_timestamp(x, y): if x.timestamp == None: if y.timestamp == None: return 0 return 1 elif y.timestamp == None: return -1 elif x.timestamp > y.timestamp: return -1 elif x.timestamp == y.timestamp: return 0 else: return 1 entries.sort(sort_by_timestamp) return entries[:max_count]
def read_one(self): from c2cgeoportal_commons.models.main import ( # pylint: disable=import-outside-toplevel Layer, RestrictionArea, Role, ) set_common_headers(self.request, "layers", NO_CACHE) layer = self._get_layer_for_request() protocol = self._get_protocol_for_layer(layer) feature_id = self.request.matchdict.get("feature_id") feature = protocol.read(self.request, id=feature_id) if not isinstance(feature, Feature): return feature if layer.public: return feature if self.request.user is None: raise HTTPForbidden() geom = feature.geometry if not geom or isinstance( geom, geojson.geometry.Default): # pragma: no cover return feature shape = asShape(geom) srid = self._get_geom_col_info(layer)[1] spatial_elt = from_shape(shape, srid=srid) allowed = models.DBSession.query(func.count(RestrictionArea.id)) allowed = allowed.join(RestrictionArea.roles) allowed = allowed.join(RestrictionArea.layers) allowed = allowed.filter(Role.id.in_(get_roles_id(self.request))) allowed = allowed.filter(Layer.id == layer.id) allowed = allowed.filter( or_(RestrictionArea.area.is_(None), RestrictionArea.area.ST_Contains(spatial_elt))) if allowed.scalar() == 0: raise HTTPForbidden() return feature
def get_data_points(self, measurement_id, point_name): # t0 = time.time() sel = select([self.DataPoints]).where(and_(self.DataPoints.c.measurement_id == measurement_id, or_(self.DataPoints.c.point_name_long.like(point_name), self.DataPoints.c.point_name_short.like(point_name)))).order_by( 'point_measured') result = self.connection.execute(sel) # t1 = time.time() # print 'SELECT time', t1-t0, 's' data_points = [] measurement_date = [] point_unit_name = [] point_order = [] for row in result: sign, mantissa, exponent, bytecount = row['point_sign'], row['point_mantissa'], row['point_exponent'], row[ 'point_bytecount'] data_points.append(np.float(mpmath.mpf((sign, mantissa, exponent, bytecount)))) measurement_date.append(row['point_measured']) point_unit_name.append(row['point_unit_name']) point_order.append(row['point_order']) # t2 = time.time() # print 'DATA CONVERSION time', t2-t1, 's' # print 'TOTAL time', t2-t0, 's\n' return np.array(data_points), np.array(point_order), np.array(measurement_date), np.array(point_unit_name)
def by_is(self, query: Query, operand: str, maybe_negate: ConditionTransform) -> Query: if operand == 'private': cond = column("flags").op("&")( UserMessage.flags.is_private.mask) != 0 return query.where(maybe_negate(cond)) elif operand == 'starred': cond = column("flags").op("&")(UserMessage.flags.starred.mask) != 0 return query.where(maybe_negate(cond)) elif operand == 'unread': cond = column("flags").op("&")(UserMessage.flags.read.mask) == 0 return query.where(maybe_negate(cond)) elif operand == 'mentioned': cond1 = column("flags").op("&")( UserMessage.flags.mentioned.mask) != 0 cond2 = column("flags").op("&")( UserMessage.flags.wildcard_mentioned.mask) != 0 cond = or_(cond1, cond2) return query.where(maybe_negate(cond)) elif operand == 'alerted': cond = column("flags").op("&")( UserMessage.flags.has_alert_word.mask) != 0 return query.where(maybe_negate(cond)) raise BadNarrowOperator("unknown 'is' operand " + operand)
def check_geometry(_, feature, obj): del obj # unused geom = feature.geometry if geom and not isinstance(geom, geojson.geometry.Default): shape = asShape(geom) srid = self._get_geom_col_info(layer)[1] spatial_elt = from_shape(shape, srid=srid) allowed = models.DBSession.query(func.count( RestrictionArea.id)) allowed = allowed.join(RestrictionArea.roles) allowed = allowed.join(RestrictionArea.layers) allowed = allowed.filter(RestrictionArea.readwrite.is_(True)) allowed = allowed.filter( Role.id.in_([r.id for r in self.request.user.roles])) allowed = allowed.filter(Layer.id == layer.id) allowed = allowed.filter( or_(RestrictionArea.area.is_(None), RestrictionArea.area.ST_Contains(spatial_elt))) if allowed.scalar() == 0: raise HTTPForbidden() # check if geometry is valid if self._get_validation_setting(layer): self._validate_geometry(spatial_elt)
def _prepare_uber_query_for_sprint(self, sprint, bugs): query = self.session.query uber_query = query(Client, Project, TimeEntry.ticket_id, User, Tracker, TimeEntry.description, TimeEntry.date, TimeEntry.time) uber_query = uber_query.filter(TimeEntry.user_id==User.id) \ .filter(TimeEntry.project_id==Project.id) \ .filter(Project.tracker_id==Tracker.id) \ .filter(Project.client_id==Client.id) uber_query = uber_query.filter(TimeEntry.date>=sprint.start) \ .filter(TimeEntry.date<=sprint.end) \ .filter(TimeEntry.deleted==False) if bugs: or_list = [] for bug in bugs: or_list.append(and_(TimeEntry.ticket_id==bug.id, TimeEntry.project_id==bug.project.id)) uber_query = uber_query.filter(or_(*or_list)) else: uber_query = uber_query.filter(TimeEntry.ticket_id.in_([])) uber_query = uber_query.order_by(Client.name, Project.name, TimeEntry.ticket_id, User.name) return uber_query
def GetUpdates(self, request, context): with session_scope() as session: results = (session.query(Message).join( GroupChatSubscription, GroupChatSubscription.group_chat_id == Message.conversation_id ).filter(GroupChatSubscription.user_id == context.user_id).filter( Message.time >= GroupChatSubscription.joined).filter( or_(Message.time <= GroupChatSubscription.left, GroupChatSubscription.left == None)).filter( Message.id > request.newest_message_id).order_by( Message.id.asc()).limit( DEFAULT_PAGINATION_LENGTH + 1).all()) return conversations_pb2.GetUpdatesRes( updates=[ conversations_pb2.Update( group_chat_id=message.conversation_id, message=_message_to_pb(message), ) for message in sorted(results, key=lambda message: message.id) [:DEFAULT_PAGINATION_LENGTH] ], no_more=len(results) <= DEFAULT_PAGINATION_LENGTH, )
def get_relationships(self, with_package=None, type=None, active=True, direction='both'): '''Returns relationships this package has. Keeps stored type/ordering (not from pov of self).''' assert direction in ('both', 'forward', 'reverse') if with_package: assert isinstance(with_package, Package) from ckan.model.package_relationship import PackageRelationship forward_filters = [PackageRelationship.subject == self] reverse_filters = [PackageRelationship.object == self] if with_package: forward_filters.append(PackageRelationship.object == with_package) reverse_filters.append(PackageRelationship.subject == with_package) if active: forward_filters.append( PackageRelationship.state == core.State.ACTIVE) reverse_filters.append( PackageRelationship.state == core.State.ACTIVE) if type: forward_filters.append(PackageRelationship.type == type) reverse_type = PackageRelationship.reverse_type(type) reverse_filters.append(PackageRelationship.type == reverse_type) q = meta.Session.query(PackageRelationship) if direction == 'both': q = q.filter(or_( and_(*forward_filters), and_(*reverse_filters), )) elif direction == 'forward': q = q.filter(and_(*forward_filters)) elif direction == 'reverse': q = q.filter(and_(*reverse_filters)) return q.all()
def get_breadth_first_task(project_id, user_id=None, user_ip=None, external_uid=None, offset=0, limit=1, orderby='id', desc=False, **kwargs): """Get a new task which have the least number of task runs.""" project_query = session.query(Task.id).filter( Task.project_id == project_id, Task.state != 'completed') if user_id and not user_ip and not external_uid: subquery = session.query(TaskRun.task_id).filter_by( project_id=project_id, user_id=user_id) else: if not user_ip: # pragma: no cover user_ip = '127.0.0.1' if user_ip and not external_uid: subquery = session.query(TaskRun.task_id).filter_by( project_id=project_id, user_ip=user_ip) else: subquery = session.query(TaskRun.task_id).filter_by( project_id=project_id, external_uid=external_uid) tmp = project_query.except_(subquery) query = session.query(Task, func.sum(Counter.n_task_runs).label('n_task_runs'))\ .filter(Task.id==Counter.task_id)\ .filter(Counter.task_id.in_(tmp))\ .filter(or_(Task.expiration == None, Task.expiration > datetime.utcnow()))\ .group_by(Task.id)\ .order_by('n_task_runs ASC')\ query = _set_orderby_desc(query, orderby, desc) data = query.limit(limit).offset(offset).all() return _handle_tuples(data)
def get(self): chals = (Challenges.query.filter( or_(Challenges.state != "hidden", Challenges.state != "locked")).order_by( Challenges.value).all()) Model = get_model() solves_sub = (db.session.query( Solves.challenge_id, db.func.count(Solves.challenge_id).label("solves")).join( Model, Solves.account_id == Model.id).filter( Model.banned == False, Model.hidden == False).group_by( Solves.challenge_id).subquery()) solves = (db.session.query( solves_sub.columns.challenge_id, solves_sub.columns.solves, Challenges.name, ).join(Challenges, solves_sub.columns.challenge_id == Challenges.id).all()) response = [] has_solves = [] for challenge_id, count, name in solves: challenge = {"id": challenge_id, "name": name, "solves": count} response.append(challenge) has_solves.append(challenge_id) for c in chals: if c.id not in has_solves: challenge = {"id": c.id, "name": c.name, "solves": 0} response.append(challenge) db.session.close() return {"success": True, "data": response}
def filter_date_range(table_columns, start_date=None, end_date=None): if not isinstance(table_columns, collections.Iterable): table_columns = [table_columns] filters = [] for c in table_columns: # TODO: depending on type of column format the start and end differently if 'DATE' in str(c.type) or 1 == 1: start = start_date.strftime('%Y-%m-%d') end = end_date.strftime('%Y-%m-%d') if start and end: if start == end: filters.append(c == start) else: filters.append(c.between(start, end)) elif start: filters.append(c >= start) elif end: filters.append(c <= end) else: filters.append(None) return or_(*filters)
def search(query, page=1, page_size=SEARCH_MAX_ORGANIZATIONS): '''Perform an organization search given a ``query``''' likes = ['%{0}%'.format(word) for word in query.split() if word] organizations = queries.organizations_and_counters() if likes: organizations = organizations.filter( or_( and_(*(Group.name.ilike(like) for like in likes)), and_(*(Group.title.ilike(like) for like in likes)), # GroupRevision.description.ilike(like), )) page = max(page, 1) total = organizations.count() start = (page - 1) * page_size end = start + page_size return 'organizations', { 'results': organizations[start:end], 'total': total, 'page': page, 'page_size': page_size, 'total_pages': int(math.ceil(total / float(page_size))), }
def _build_where(table, where_rpn): ''' selectのwhere句のクエリを作成 where_rpn (list): [column, value, operator, ...] => Reverse Polish Notation (el.) WHERE id <= 5 AND user LIKE "%admin%" => ["id", 5, "<=", "user", "%admin%", "like", "and"] ''' # 演算子定義 op = { '<': lambda x, y: x < y, '<=': lambda x, y: x <= y, '>': lambda x, y: x > y, '>=': lambda x, y: x >= y, '=': lambda x, y: x == y, '!=': lambda x, y: x != y, 'and': lambda x, y: and_(x, y), 'or': lambda x, y: or_(x, y), 'like': lambda x, y: x.like(y), 'in': lambda x, y: tuple_(x).in_([(e, ) for e in y]), } # 変数定義 op.update(table.columns) # 逆ポーランド記法でクエリ生成 return RPN(where_rpn, op)[0]
def _image_member_find(context, session, image_id=None, member=None, status=None): query = session.query(models.ImageMember) query = query.filter_by(deleted=False) if not context.is_admin: query = query.join(models.Image) filters = [ models.Image.owner == context.owner, models.ImageMember.member == context.owner, ] query = query.filter(sa_sql.or_(*filters)) if image_id is not None: query = query.filter(models.ImageMember.image_id == image_id) if member is not None: query = query.filter(models.ImageMember.member == member) if status is not None: query = query.filter(models.ImageMember.status == status) return query.all()
def markers_chart(self): counts = {} # flagged query = self.filter( db.session.query(func.count( Document.id)).filter(Document.flagged == True)) # noqa counts['flagged'] = query.scalar() # with URL query = self.filter( db.session.query(func.count(Document.id)).filter( Document.url != None, Document.url != '')) # noqa counts['with-url'] = query.scalar() # without URL query = self.filter( db.session.query(func.count(Document.id)).filter( or_(Document.url == None, Document.url == ''))) # noqa counts['without-url'] = query.scalar() # average people sources per document subq = self.filter( db.session .query(func.count(DocumentSource.doc_id).label('count')) .join(Document, DocumentSource.doc_id == Document.id) .filter(DocumentSource.quoted == 1) .group_by(DocumentSource.doc_id))\ .subquery('cnt') n = float( db.session.query(func.avg( subq.c.count)).select_from(subq).scalar() or 0) counts['average-sources-per-document'] = round(n, 2) return {'values': counts}
def solves_per_chal(): chals = Challenges.query\ .filter(or_(Challenges.hidden != True, Challenges.hidden == None))\ .order_by(Challenges.value)\ .all() solves_sub = db.session.query( Solves.chalid, db.func.count(Solves.chalid).label('solves') )\ .join(Teams, Solves.teamid == Teams.id) \ .filter(Teams.banned == False) \ .group_by(Solves.chalid).subquery() solves = db.session.query( solves_sub.columns.chalid, solves_sub.columns.solves, Challenges.name ) \ .join(Challenges, solves_sub.columns.chalid == Challenges.id).all() data = {} if utils.hide_scores(): for chal, count, name in solves: data[chal] = -1 for c in chals: if c.id not in data: data[c.id] = -1 else: for chal, count, name in solves: data[chal] = count for c in chals: if c.id not in data: data[c.id] = 0 db.session.close() return jsonify(data)
async def list_books(user_id: str = None, q: str = None, sort_col: Optional[str] = "modified_at", sort_order: Optional[str] = "desc") -> List[Book]: """ Lists Books matching the provided parameters. """ query = select([books, users.c.pseudonym.label('author') ]).select_from(books.join(users)) if user_id: query = query.where(books.c.user_id == user_id) if q: query = query.where( or_(books.c.title.ilike(f"%{q}%"), books.c.description.ilike(f"%{q}%"))) try: query = query.order_by( getattr(getattr(books.c, sort_col), sort_order)()) except AttributeError as ex: raise HTTPException( status_code=400, detail=f"Invalid sort parameter '{ex}'.", ) return await database.fetch_all(query)
def prune_user_sticker_placements(self, subject_id, user_id, maximum_stickers): try: subject_id = get_id_bytes(subject_id) #TODO narrow catch except: return try: user_id = get_id_bytes(user_id) #TODO narrow catch except: return placements = self.search_sticker_placements( filter={ 'subject_ids': subject_id, 'user_ids': user_id, }, sort='placement_time', order='desc', ) conditions = [] i = 0 for placement in placements.values(): i += 1 if i < maximum_stickers: continue conditions.append(self.sticker_placements.c.id == placement.id_bytes) if not conditions: return statement = self.sticker_placements.delete().where( and_( self.sticker_placements.c.subject_id == subject_id, self.sticker_placements.c.user_id == user_id, or_(*conditions), ) ) self.connection.execute(statement)
def apply(self, query, value, alias=None): # NOT IN can exclude NULL values, so "or_ == None" needed to be added column = self.get_column(alias) return query.filter(or_(~column.in_(value), column == None)) # noqa: E711
def generate(self): "generate query" for filt in self.filters: attr = getattr(self.model, filt['field']) if filt['filter'] == '1': key = "%s_equal" % filt['field'] expr = attr == filt['value'] self._load_keys(key, expr) if filt['filter'] == '2': key = "%s_notequal" % filt['field'] expr = attr != filt['value'] self._load_keys(key, expr) if filt['filter'] == '3': key = "%s_greaterthan" % filt['field'] expr = attr > filt['value'] self._load_keys(key, expr) if filt['filter'] == '4': key = "%s_lessthan" % filt['field'] expr = attr < filt['value'] self._load_keys(key, expr) if filt['filter'] == '5': key = "%s_contains" % filt['field'] expr = attr.ilike('%' + filt['value'] + '%') self._load_keys(key, expr) if filt['filter'] == '6': key = "%s_ncontains" % filt['field'] expr = ~attr.ilike('%' + filt['value'] + '%') self._load_keys(key, expr) if filt['filter'] == '7': key = "%s_regex" % filt['field'] expr = attr.op('regexp')(filt['value']) self._load_keys(key, expr) if filt['filter'] == '8': key = "%s_nregex" % filt['field'] expr = ~attr.op('regexp')(filt['value']) self._load_keys(key, expr) if filt['filter'] == '9': key = "%s_isnull" % filt['field'] expr = attr == None self._load_keys(key, expr) if filt['filter'] == '10': key = "%s_nisnull" % filt['field'] expr = attr != None self._load_keys(key, expr) if filt['filter'] == '11': key = "%s_istrue" % filt['field'] expr = attr > 0 self._load_keys(key, expr) if filt['filter'] == '12': key = "%s_isfalse" % filt['field'] expr = attr <= 0 self._load_keys(key, expr) self.addclauses.extend(self.kwargs.values()) if self.addclauses or self.orclauses: if self.addclauses and self.orclauses: orlist = [or_(*self.orclauses[key]) for key in self.orclauses] query = self.query.filter(and_(and_(*self.addclauses), *orlist)) else: if self.addclauses: query = self.query.filter(and_(*self.addclauses)) if self.orclauses: orlist = [ or_(*self.orclauses[key]) for key in self.orclauses ] query = self.query.filter(and_(*orlist)) return query return self.query
def push(username, yes, **kwargs): """Push builds out to the repositories.""" resume = kwargs.pop('resume') resume_all = False initialize_db(config) db_factory = transactional_session_maker() composes = [] with db_factory() as session: if not resume and session.query(Compose).count(): if yes: click.echo('Existing composes detected: {}. Resuming all.'.format( ', '.join([str(c) for c in session.query(Compose).all()]))) else: click.confirm( 'Existing composes detected: {}. Do you wish to resume them all?'.format( ', '.join([str(c) for c in session.query(Compose).all()])), abort=True) resume = True resume_all = True # If we're resuming a push if resume: for compose in session.query(Compose).all(): if len(compose.updates) == 0: # Compose objects can end up with 0 updates in them if the composer ejects all # the updates in a compose for some reason. Composes with no updates cannot be # serialized because their content_type property uses the content_type of the # first update in the Compose. Additionally, it doesn't really make sense to go # forward with running an empty Compose. It makes the most sense to delete them. click.echo("{} has no updates. It is being removed.".format(compose)) session.delete(compose) continue if not resume_all: if yes: click.echo('Resuming {}.'.format(compose)) elif not click.confirm('Resume {}?'.format(compose)): continue # Reset the Compose's state and error message. compose.state = ComposeState.requested compose.error_message = '' composes.append(compose) else: updates = [] # Accept both comma and space separated request list requests = kwargs['request'].replace(',', ' ').split(' ') requests = [UpdateRequest.from_string(val) for val in requests] query = session.query(Update).filter(Update.request.in_(requests)) if kwargs.get('builds'): query = query.join(Update.builds) query = query.filter( or_(*[Build.nvr == build for build in kwargs['builds'].split(',')])) if kwargs.get('updates'): query = query.filter( or_(*[Update.alias == alias for alias in kwargs['updates'].split(',')])) query = _filter_releases(session, query, kwargs.get('releases')) for update in query.all(): # Skip unsigned updates (this checks that all builds in the update are signed) update_sig_status(update) if not update.signed: click.echo( f'Warning: {update.get_title()} has unsigned builds and has been skipped') continue updates.append(update) composes = Compose.from_updates(updates) for c in composes: session.add(c) # We need to flush so the database knows about the new Compose objects, so the # Compose.updates relationship will work properly. This is due to us overriding the # primaryjoin on the relationship between Composes and Updates. session.flush() # Now we need to refresh the composes so their updates property will not be empty. for compose in composes: session.refresh(compose) # Now we need to sort the composes so their security property can be used to prioritize # security updates. The security property relies on the updates property being # non-empty, so this must happen after the refresh above. composes = sorted(composes) for compose in composes: click.echo('\n\n===== {} =====\n'.format(compose)) for update in compose.updates: click.echo(update.get_title()) if composes: if yes: click.echo('\n\nPushing {:d} updates.'.format( sum([len(c.updates) for c in composes]))) else: click.confirm('\n\nPush these {:d} updates?'.format( sum([len(c.updates) for c in composes])), abort=True) click.echo('\nLocking updates...') else: click.echo('\nThere are no updates to push.') composes = [c.__json__(composer=True) for c in composes] if composes: click.echo('\nSending composer.start message') bodhi.server.notifications.publish(composer_schemas.ComposerStartV1.from_dict(dict( api_version=2, composes=composes, resume=resume, agent=username)), force=True)
def filter_query(self, query): query = query.filter( Document.analysis_nature_id == self.analysis_nature_id.data) if self.cluster_id.data: query = query.join(ClusteredDocument)\ .filter(ClusteredDocument.cluster_id == self.cluster_id.data) if self.medium_id.data: query = query.filter(Document.medium_id.in_(self.medium_id.data)) if self.user_id.data: if self.user_id.data == '-': query = query.filter( or_( Document.created_by_user_id == None, # noqa Document.checked_by_user_id == None)) else: query = query.filter( or_(Document.created_by_user_id == self.user_id.data, Document.checked_by_user_id == self.user_id.data)) if self.country_id.data: query = query.filter(Document.country_id.in_(self.country_id.data)) if self.created_from: query = query.filter(Document.created_at >= self.created_from) if self.created_to: query = query.filter(Document.created_at <= self.created_to) if self.published_from: query = query.filter(Document.published_at >= self.published_from) if self.published_to: query = query.filter(Document.published_at <= self.published_to) if self.source_person_id.data: query = query\ .join(DocumentSource)\ .filter(DocumentSource.person_id == self.source_person_id.data) if self.problems.data: for code in self.problems.data: query = DocumentAnalysisProblem.lookup(code).filter_query( query) if self.flagged.data: query = query.filter(Document.flagged == True) # noqa if self.has_url.data == '1': query = query.filter(Document.url != None, Document.url != '') # noqa elif self.has_url.data == '0': query = query.filter(or_(Document.url == None, Document.url == '')) # noqa if self.q.data: # full text search query = query.filter( FullTextSearch(self.q.data, Document, FullTextMode.NATURAL)) if self.tags.data: tags = set(f for f in re.split('\s*,\s*', self.tags.data) if f) for tag in tags: query = query.filter(Document.tags.contains(tag)) return query
def bill_on_interval(period_start, period_stop, account_id=None): """ Retrieve statistics for the given interval [``period_start``, ``period_stop``]. ``account_id=None`` means all accounts. Example of the returned value: .. code-block:: python { 1: [ { "name": "16", "rtype": "nova/instance", "created_at": "2011-01-02T00:00:00Z", "destroyed_at": null, "parent_id": null, "cost": 0.0, "id": 1 }, { "name": null, "rtype": "local_gb", "created_at": "2011-01-02T00:00:00Z", "destroyed_at": null, "parent_id": 1, "cost": 1200.0, "id": 2 }, { "name": null, "rtype": "memory_mb", "created_at": "2011-01-02T00:00:00Z", "destroyed_at": null, "parent_id": 1, "cost": 380928.0, "id": 3 } ] } :returns: a dictionary where keys are account ids and values are billing lists. """ result = (db.session.query( Segment, Resource).join(Resource).filter(Segment.begin_at < period_stop).filter( or_(Segment.end_at > period_start, Segment.end_at == None))) if account_id: result = result.filter(Resource.account_id == account_id) retval = {} rsrc_by_id = {} now = datetime.utcnow() for segment, rsrc in result: if not retval.has_key(rsrc.account_id): retval[rsrc.account_id] = [] try: rsrc_descr = rsrc_by_id[rsrc.id] except KeyError: rsrc_descr = { "id": rsrc.id, "created_at": None, "destroyed_at": None, "cost": 0.0, "parent_id": rsrc.parent_id, "name": rsrc.name, "rtype": rsrc.rtype, } retval[rsrc.account_id].append(rsrc_descr) rsrc_by_id[rsrc.id] = rsrc_descr begin_at = max(segment.begin_at, period_start) end_at = min(segment.end_at or now, period_stop) rsrc_descr["cost"] += utils.cost_add(segment.cost, begin_at, end_at) result = (db.session.query( Segment, func.min(Segment.begin_at).label("min_start"), func.max(Segment.begin_at).label("max_start"), func.max(Segment.end_at).label("max_stop"), Resource.id).join(Resource).group_by( Resource.id).filter(Segment.begin_at < period_stop).filter( or_(Segment.end_at > period_start, Segment.end_at == None))) if account_id: result = result.filter(Resource.account_id == account_id) for row in result: rsrc_descr = rsrc_by_id.get(row.id, None) if not rsrc_descr: continue rsrc_descr["created_at"] = row.min_start if row.max_stop is None or row.max_start < row.max_stop: rsrc_descr["destroyed_at"] = row.max_stop return retval
def bookmark_list(): ''' Returns a list of bookmarks ''' search_form = SearchForm(request.args) # Create the base query # After this query is created, we keep iterating over it until we reach the desired level of filtering query = Bookmark.query.filter_by( user=current_user.id, deleted=False).order_by(Bookmark.added_on.desc()) # Get list of tags we'll be filtering by tag_args = request.values.getlist('tags') if len(tag_args) == 0: tag_args = None else: for tag in tag_args: # Check is any of the tags for the bookmark match up with tag query = query.filter(Bookmark.tags.any(tag)) # This means that the search form has been used if search_form.query.data is not None: # Search query type can be either basic, full text, or url # This is basic search, which searches in the bookmark titles and descriptions if search_form.parameter.data == 'basic': query = search(query, search_form.query.data, vector=Bookmark.search_vector) # XXX is this safe? user_count = Bookmark.query.filter_by(user=current_user.id, deleted=False).count() # Postgres full text search seems to fail when using non-ASCII characters # When the failure happens, all the bookmarks are returned instead # We check if this has happened, and if it has, we fall back to non-indexed search instead if query.count() == user_count: query = query.filter( or_(Bookmark.title.contains(search_form.query.data), Bookmark.description.contains(search_form.query.data))) elif search_form.parameter.data == 'ft': # We will search over the entire contents of the page here query = search(query, search_form.query.data, vector=Bookmark.fulltext_vector) user_count = Bookmark.query.filter_by(user=current_user.id, deleted=False).count() if query.count() == user_count: query = query.filter( Bookmark.full_text.contains(search_form.query.data)) # URL search lets you filter by domains or other parts of the url elif search_form.parameter.data == 'url': query = query.filter( Bookmark.main_url.contains(search_form.query.data)) else: pass # Context view takes you to the page the bookmark with a specific id is present on # Here the id is used to know which bookmark should be highlighted try: context_id = request.args['bid'] except KeyError: context_id = 0 # Pagination, with defaulting to the first page page = request.args.get('page', 1, type=int) # Users are allowed to specify how many bookmarks they want per page bookmarks_per_page = User.query.get(current_user.id).bookmarks_per_page # Paginate the results of our query pagination = query.paginate(page, per_page=bookmarks_per_page, error_out=False) delete_form = DeleteForm() return render_template("manager/bookmark_list.html", pagination=pagination, search_form=search_form, delete_form=delete_form, context_id=context_id, tag_args=tag_args)
def saveProductGroup(self, code, name, buy_code, sell_code, edititer=None): msg = "" if code == "": msg += _("Group code should not be empty.\n") if name == "": msg = _("Group name should not be empty.\n") #TODO set default values for buyid & sellid if empty if msg != "": msgbox = gtk.MessageDialog(None, gtk.DIALOG_MODAL, gtk.MESSAGE_WARNING, gtk.BUTTONS_CLOSE, msg) msgbox.set_title(_("Empty fields")) msgbox.run() msgbox.destroy() return False if edititer != None: pcode = unicode(self.treestore.get_value(edititer, 0)) #pcode = utility.convertToLatin(pcode) query = config.db.session.query(ProductGroups).select_from( ProductGroups) group = query.filter(ProductGroups.code == pcode).first() gid = group.id #code = utility.convertToLatin(code) buy_code = utility.convertToLatin(buy_code) sell_code = utility.convertToLatin(sell_code) #Checks if the group name or code is repeated. query = config.db.session.query(ProductGroups).select_from( ProductGroups) query = query.filter( or_(ProductGroups.code == code, ProductGroups.name == name)) if edititer != None: query = query.filter(ProductGroups.id != gid) result = query.all() msg = "" for grp in result: if grp.code == code: msg += _("A group with this code already exists.\n") break elif grp.name == name: msg += _("A group with this name already exists.\n") break #Check if buy_code & sell_code are valid #TODO Check if buying subject is creditor/debtor, and so for selling one. query = config.db.session.query(Subject).select_from(Subject) buy_sub = query.filter(Subject.code == buy_code).first() if buy_sub == None: msg += _("Buying code is not valid.\n") query = config.db.session.query(Subject).select_from(Subject) sell_sub = query.filter(Subject.code == sell_code).first() if sell_sub == None: msg += _("Selling code is not valid.\n") if msg != "": msgbox = gtk.MessageDialog(None, gtk.DIALOG_MODAL, gtk.MESSAGE_ERROR, gtk.BUTTONS_CLOSE, msg) msgbox.set_title(_("Invalid group properties")) msgbox.run() msgbox.destroy() return False if edititer == None: group = ProductGroups(code, name, buy_sub.id, sell_sub.id) edititer = self.treestore.append(None) path = self.treestore.get_path(edititer) self.treeview.scroll_to_cell(path, None, False, 0, 0) self.treeview.set_cursor(path, None, False) else: group.code = code group.name = name group.buyId = buy_sub.id group.sellId = sell_sub.id config.db.session.add(group) config.db.session.commit() if config.digittype == 1: #code = utility.convertToPersian(code) buy_code = utility.convertToPersian(buy_code) sell_code = utility.convertToPersian(sell_code) self.saveRow(edititer, (code, name, buy_code, sell_code)) return True
def find_all_in_period(self, params): with self.new_session() as session: query = session.query(CallLogSchema) query = query.options(joinedload('participants')) if params.get('start'): query = query.filter(CallLogSchema.date >= params['start']) if params.get('end'): query = query.filter(CallLogSchema.date < params['end']) if params.get('search'): filters = (sql.cast(column, sa.String).ilike('%%%s%%' % params['search']) for column in self.searched_columns) query = query.filter(sql.or_(*filters)) if params.get('user_uuids'): filters = (CallLogSchema.participant_user_uuids.contains(str(user_uuid)) for user_uuid in params['user_uuids']) query = query.filter(sql.or_(*filters)) if params.get('call_direction'): query = query.filter(CallLogSchema.direction == params['call_direction']) if params.get('number'): sql_regex = params['number'].replace('_', '%') filters = (sql.cast(column, sa.String).like('%s' % sql_regex) for column in [CallLogSchema.source_exten, CallLogSchema.destination_exten]) query = query.filter(sql.or_(*filters)) for tag in params.get('tags', []): query = query.filter(CallLogSchema.participants.any( CallLogParticipant.tags.contains(sql.cast([tag], ARRAY(sa.String))) )) order_field = None if params.get('order'): if params['order'] == 'marshmallow_duration': order_field = CallLogSchema.date_end-CallLogSchema.date_answer elif params['order'] == 'marshmallow_answered': order_field = CallLogSchema.date_answer else: order_field = getattr(CallLogSchema, params['order']) if params.get('direction') == 'desc': order_field = order_field.desc() if order_field is not None: query = query.order_by(order_field) if params.get('limit'): query = query.limit(params['limit']) if params.get('offset'): query = query.offset(params['offset']) call_log_rows = query.all() if not call_log_rows: return [] for call_log in call_log_rows: make_transient(call_log) for participant in call_log.participants: make_transient(participant) return call_log_rows
def process_send_message_notifications(payload): """ Sends out email notifications for messages that have been unseen for a long enough time """ # very crude and dumb algorithm logger.info(f"Sending out email notifications for unseen messages") with session_scope() as session: # users who have unnotified messages older than 5 minutes in any group chat users = ( session.query(User).join( GroupChatSubscription, GroupChatSubscription.user_id == User.id).join( Message, Message.conversation_id == GroupChatSubscription.group_chat_id).filter( Message.time >= GroupChatSubscription.joined).filter( or_(Message.time <= GroupChatSubscription.left, GroupChatSubscription.left == None)). filter(Message.id > User.last_notified_message_id).filter( Message.id > GroupChatSubscription.last_seen_message_id). filter(Message.time < now() - timedelta(minutes=5)).filter( Message.message_type == MessageType.text) # TODO: only text messages for now .all()) for user in users: # now actually grab all the group chats, not just less than 5 min old subquery = ( session.query( GroupChatSubscription.group_chat_id.label("group_chat_id"), func.max(GroupChatSubscription.id).label( "group_chat_subscriptions_id"), func.max(Message.id).label("message_id"), func.count(Message.id).label("count_unseen"), ).join( Message, Message.conversation_id == GroupChatSubscription.group_chat_id).filter( GroupChatSubscription.user_id == user.id). filter(Message.id > user.last_notified_message_id).filter( Message.id > GroupChatSubscription.last_seen_message_id). filter(Message.time >= GroupChatSubscription.joined).filter( Message.message_type == MessageType.text) # TODO: only text messages for now .filter( or_(Message.time <= GroupChatSubscription.left, GroupChatSubscription.left == None)).group_by( GroupChatSubscription.group_chat_id).order_by( func.max(Message.id).desc()).subquery()) unseen_messages = (session.query( GroupChat, Message, subquery.c.count_unseen).join( subquery, subquery.c.message_id == Message.id).join( GroupChat, GroupChat.conversation_id == subquery.c.group_chat_id).order_by( subquery.c.message_id.desc()).all()) user.last_notified_message_id = max( message.id for _, message, _ in unseen_messages) session.commit() total_unseen_message_count = sum( count for _, _, count in unseen_messages) email.enqueue_email_from_template( user.email, "unseen_messages", template_args={ "user": user, "total_unseen_message_count": total_unseen_message_count, "unseen_messages": [(group_chat, latest_message, count) for group_chat, latest_message, count in unseen_messages], "group_chats_link": urls.messages_link(), }, )
def userlist(page=1, pending=False, sponsored=False, rolloffs=False): # We need to make sure sponsors without admin/labstaff permissions don't go # browsing through all the users data domains = g.user.get_domains() if ((sponsored and 'sponsor' not in domains) or (not sponsored and (not g.user_is_labstaff) and (not g.user_is_admin)) or (rolloffs and (not g.user_is_labstaff) and (not g.user_is_admin))): flash("Unauthorized.") return redirect('index') if (pending or rolloffs) and request.method == "POST": form = Form(request.form) if not form.validate(): flash("There was an error with your submission.") redirect(request.url) users = [ user for user, value in request.form.iteritems() if value == 'approve' ] if rolloffs: users = [user for user, value in request.form.iteritems()] users = [User.username == user for user in users] if len(users) > 0: query = User.query.filter(or_(*users)) if sponsored: # Filter and make sure we only get this sponsors users, for security query = query.filter(User.sponsor == g.user.username) users = query.all() for user in users: if sponsored: user.status = 'pending_labstaff' elif rolloffs: user.status = 'pending_rolloff' else: user.status = 'pending_create' db.session.add(user) db.session.commit() # here we handle denying accounts: users = [ user for user, value in request.form.iteritems() if value == 'deny' ] users = [User.username == user for user in users] if len(users) > 0: query = User.query.filter(or_(*users)) if sponsored: query = query.filter(User.sponsor == g.user.username) users = query.all() for user in users: # send rejection emails, silent reject if comments are empty if sponsored: mail.sponsor_reject(user) elif g.user_is_admin: mail.admin_reject(user) # drop rejected users db.session.delete(user) db.session.commit() query = User.query sort = 'username' sort_col = User.username sort_dir = asc cols = { 'username': User.username, 'uid': User._uid, 'sponsor': User.sponsor, 'email': User.email, 'name': User.last_name, 'last_name': User.last_name, 'first_name': User.first_name, 'status': User.status, 'grad_date': User._grad_date } # Let's make the filter form class FilterForm(Form): pass for field, col in cols.iteritems(): setattr(FilterForm, field, TextField()) filter_form = FilterForm(request.args) if 'sort' in request.args: if request.args['sort'] in cols: sort = request.args['sort'] sort_col = cols[request.args['sort']] if 'dir' in request.args and request.args['dir'] == 'desc': sort_dir = desc if sponsored: query = query.filter(User.sponsor == g.user.username) if pending: query = query.filter(User.status == 'pending_sponsor') else: query = query.filter(User.status != 'pending_sponsor') elif rolloffs: now = date.today() query = query.filter(User._grad_date <= now) query = query.filter(User.status != 'pending_sponsor') query = query.filter(User.status != 'pending_labstaff') query = query.filter(User.status != 'pending_rolloff') else: if pending: query = query.filter(User.status == 'pending_labstaff') # else: # query = query.filter(User.status != 'pending_labstaff') # query = query.filter(User.status != 'pending_sponsor') for field, col in cols.iteritems(): if field in request.args: if request.args[field].strip() == "": continue query = query.filter(col.like(request.args[field].strip())) query = query.order_by(sort_dir(sort_col)) page = query.paginate(page) if pending: # Patch a Form. This allows us to keep our CSRF protection class F(Form): pass for user in page.items: setattr( F, user.username, RadioField(choices=[('approve', 'Approve'), ('postpone', 'Postpone'), ('deny', 'Deny')], validators=[validators.Required()])) # Flask-WTForms likes to pull data from request.form. Force it not to. form = F(ImmutableMultiDict()) # We do this after the fact so WTForms can do some of its binding for user in page.items: user.radio = getattr(form, user.username) user.radio.data = 'postpone' if sponsored: template = 'sponsorship_requests.html' else: template = 'list_pending_users.html' elif rolloffs: class rolloffCheckbox(Form): pass for user in page.items: setattr(rolloffCheckbox, user.username, BooleanField()) form = rolloffCheckbox(ImmutableMultiDict()) for user in page.items: user.checkbox = getattr(form, user.username) template = 'list_upcoming_rolloffs.html' else: form = Form() if sponsored: template = 'sponsored_users.html' else: template = 'userlist.html' return render_template(template, page=page, sort=sort, sort_dir='asc' if sort_dir == asc else 'desc', form=form, filter_form=filter_form)
def query_releases_json(request): """ Search releases by given criteria, returning the results as JSON. Args: request (pyramid.request): The current request. Returns: dict: A dictionary with the following keys: releases: An iterable of the Releases that match the query. page: The current page. pages: The total number of pages. rows_per_page: The number of rows on a page. total: The number of matching results. """ db = request.db data = request.validated query = db.query(Release) ids = data.get('ids') if ids is not None: query = query.filter(or_(*[Release.id == _id for _id in ids])) name = data.get('name') if name is not None: query = query.filter(Release.name.like(name)) updates = data.get('updates') if updates is not None: query = query.join(Release.builds).join(Build.update) args = [Update.alias == update.alias for update in updates] query = query.filter(or_(*args)) packages = data.get('packages') if packages is not None: query = query.join(Release.builds).join(Build.package) query = query.filter(or_(*[Package.id == p.id for p in packages])) exclude_archived = data.get('exclude_archived') if exclude_archived: query = query.filter(Release.state != ReleaseState.archived) state = data.get('state') if state is not None: query = query.filter(Release.state == ReleaseState.from_string(state)) # We can't use ``query.count()`` here because it is naive with respect to # all the joins that we're doing above. count_query = query.with_labels().statement\ .with_only_columns([func.count(distinct(Release.id))])\ .order_by(None) total = db.execute(count_query).scalar() page = data.get('page') rows_per_page = data.get('rows_per_page') pages = int(math.ceil(total / float(rows_per_page))) query = query.offset(rows_per_page * (page - 1)).limit(rows_per_page) return dict( releases=query.all(), page=page, pages=pages, rows_per_page=rows_per_page, total=total, )
def run(server: int = 1, of: int = 1, baseline: bool = False) -> List[Tuple[Strace, List[ScoringResult]]]: """Run experiment. Parameters ---------- server : int Server number (one-indexed). This option is intended for distributing analysis among multiple servers. If it is specified, the experiment will start with the ith dockerfile trace. This option must be specified with the ``of`` option. of : int Number of servers (one-indexed). This option is intended for distributing analysis among multiple servers. If it is specified, the experiment will process every ith dockerfile trace. This option must be specified with the ``server`` option. baseline : bool Run the baseline experiment. The baseline is run with the same dataset, but does not do any preprocessing and compares straces using the Jaccard Coefficient scoring method with strict syscall equality. Returns ------- List[Tuple[Strace, List[ScoringResult]]] List of (trace, results) pairs for each known dockerfile trace. The comparison results for each trace are sorted in descending order by score (best match first). """ # Validate options. if server > of: raise ValueError( 'Server number cannot be bigger than the number of servers.') logger.info('Starting Dockerfile Top 100 experiment.') # Get correct scoring method. if baseline: logger.info(f'Running as baseline.') compare = compare_jc else: compare = compare_nic logger.info(f'Using comparison method `{compare.__class__.__name__}`.') # Trace executables. manager.trace_untraced(subset=DOCKERFILE_EXECUTABLES) manager.parse(collectors={'untraced'}) # Log stats and filter. num_executables = len(DOCKERFILE_EXECUTABLES) num_filtered = 100 - num_executables logger.info(f'\n' f'Dockerfile executable stats:\n' f' {num_filtered} executables filtered from top 100.\n' f' {num_executables} executables will be used.') dockerfile_executable_tuples = list( (exe.executable, exe.arguments) for exe in DOCKERFILE_EXECUTABLES) # Load all traces. logger.info('Loading traces...') traces = manager.traces(where=or_( t_straces.c.collector == ANSIBLE_PLAYBOOK_COLLECTOR, t_straces.c.collector == DOCKERFILE_COLLECTOR, )) logger.info('Done loading traces.') # Bin and get experiment traces. logger.info('Binning traces by system...') traces_by_system = manager.traces_by(keys=('system', ), sort_keys=('collector_assigned_id', ), traces=traces) logger.info('Done binning traces by system.') ansible_traces = traces_by_system['ansible'] logger.info(f'{len(ansible_traces)} ansible traces loaded.') logger.info('Binning dockerfile system traces by collector...') dockerfile_system_traces_by_collector = manager.traces_by( keys=('collector', ), traces=traces_by_system[DOCKERFILE_SYSTEM]) logger.info('Done binning dockerfile system traces by collector.') dockerfile_traces = list( trace for trace in dockerfile_system_traces_by_collector[DOCKERFILE_COLLECTOR] if (trace.executable, trace.arguments) in dockerfile_executable_tuples) logger.info(f'{len(dockerfile_traces)} dockerfile traces loaded.') # Preprocess if not baseline. if not baseline: logger.info('Preprocessing...') for preprocessor in preprocessors: for trace in traces: preprocessor(trace, all_traces=traces) logger.info('Done preprocessing.') # Compute similarity scores. logger.info('Computing similarity scores...') dockerfile_results = [] for dockerfile_trace in dockerfile_traces[server - 1::of]: # List of results just for this dockerfile trace. trace_results = [] # Compare against all available ansible traces. for ansible_trace in ansible_traces: # Run comparison. trace_results.append( compare( dockerfile_trace, ansible_trace, all_traces=traces, )) # Sort trace results in descending order by score and append # to the overall results. trace_results = sorted(trace_results, reverse=True) # Normalize trace result scores. max_trace_score = max(res.score for res in trace_results) for res in trace_results: res.normalized_score = res.score / max_trace_score # Append final results dockerfile_results.append((dockerfile_trace, trace_results)) logger.info('Done computing similarity scores.') # Return final results for all dockerfile traces. return dockerfile_results
def query_updates(request): """ Search updates by given criteria. Args: request (pyramid.request): The current request. Returns: dict: A dictionary with at least the following key mappings: updates: An iterable of the updates that match the query. page: The current page. pages: The total number of pages. rows_per_page: How many results on on the page. total: The total number of updates matching the query. package: The package corresponding to the first update found in the search. """ db = request.db data = request.validated query = db.query(Update) approved_since = data.get('approved_since') if approved_since is not None: query = query.filter(Update.date_approved >= approved_since) approved_before = data.get('approved_before') if approved_before is not None: query = query.filter(Update.date_approved < approved_before) bugs = data.get('bugs') if bugs is not None: query = query.join(Update.bugs) query = query.filter(or_(*[Bug.bug_id == bug_id for bug_id in bugs])) critpath = data.get('critpath') if critpath is not None: query = query.filter(Update.critpath == critpath) like = data.get('like') if like is not None: query = query.join(Update.builds) query = query.filter(Build.nvr.like('%%%s%%' % like)) search = data.get('search') if search is not None: query = query.join(Update.builds) query = query.filter( or_(Build.nvr.ilike('%%%s%%' % search), Update.alias.ilike('%%%s%%' % search))) locked = data.get('locked') if locked is not None: query = query.filter(Update.locked == locked) modified_since = data.get('modified_since') if modified_since is not None: query = query.filter(Update.date_modified >= modified_since) modified_before = data.get('modified_before') if modified_before is not None: query = query.filter(Update.date_modified < modified_before) packages = data.get('packages') if packages is not None: query = query.join(Update.builds).join(Build.package) query = query.filter(or_(*[Package.name == pkg for pkg in packages])) package = None if packages and len(packages): package = packages[0] builds = data.get('builds') if builds is not None: query = query.join(Update.builds) query = query.filter(or_(*[Build.nvr == build for build in builds])) pushed = data.get('pushed') if pushed is not None: query = query.filter(Update.pushed == pushed) pushed_since = data.get('pushed_since') if pushed_since is not None: query = query.filter(Update.date_pushed >= pushed_since) pushed_before = data.get('pushed_before') if pushed_before is not None: query = query.filter(Update.date_pushed < pushed_before) releases = data.get('releases') if releases is not None: query = query.filter(or_(*[Update.release == r for r in releases])) # This singular version of the plural "releases" is purely for bodhi1 # backwards compat (mostly for RSS feeds) - threebean release = data.get('release') if release is not None: query = query.filter(Update.release == release) req = data.get('request') if req is not None: query = query.filter(Update.request == req) severity = data.get('severity') if severity is not None: query = query.filter(Update.severity == severity) status = data.get('status') if status is not None: query = query.filter(or_(*[Update.status == s for s in status])) submitted_since = data.get('submitted_since') if submitted_since is not None: query = query.filter(Update.date_submitted >= submitted_since) submitted_before = data.get('submitted_before') if submitted_before is not None: query = query.filter(Update.date_submitted < submitted_before) suggest = data.get('suggest') if suggest is not None: query = query.filter(Update.suggest == suggest) type = data.get('type') if type is not None: query = query.filter(Update.type == type) content_type = data.get('content_type') if content_type is not None: query = query.join(Update.builds) query = query.filter(Build.type == content_type) gating_status = data.get('gating') if gating_status is not None: query = query.filter(Update.test_gating_status == gating_status) user = data.get('user') if user is not None: query = query.filter(or_(*[Update.user == u for u in user])) updateid = data.get('updateid') if updateid is not None: query = query.filter(or_(*[Update.alias == uid for uid in updateid])) alias = data.get('alias') if alias is not None: query = query.filter(or_(*[Update.alias == a for a in alias])) from_side_tag = data.get('from_side_tag') if from_side_tag is not None: if from_side_tag: query = query.filter(Update.from_tag.isnot(None)) else: query = query.filter(Update.from_tag.is_(None)) query = query.order_by(Update.date_submitted.desc()) # We can't use ``query.count()`` here because it is naive with respect to # all the joins that we're doing above. count_query = query.with_labels().statement\ .with_only_columns([func.count(distinct(Update.id))])\ .order_by(None) total = db.execute(count_query).scalar() page = data.get('page') rows_per_page = data.get('rows_per_page') pages = int(math.ceil(total / float(rows_per_page))) query = query.offset(rows_per_page * (page - 1)).limit(rows_per_page) return_values = dict( updates=query.all(), page=page, pages=pages, rows_per_page=rows_per_page, total=total, chrome=data.get('chrome'), display_user=data.get('display_user', False), display_request=data.get('display_request', True), package=package, ) # we need some extra information for the searching / filterings interface # when rendering the html, so we add this here. if request.accept.accept_html(): return_values.update( gating_statuses=sorted( list(bodhi.server.models.TestGatingStatus.values())), types=list(bodhi.server.models.UpdateType.values()), severities=sorted(list( bodhi.server.models.UpdateSeverity.values()), key=bodhi.server.util.sort_severity), statuses=list(bodhi.server.models.UpdateStatus.values()), releases=Release.all_releases(), ) return return_values