def __get_subquery(self, *args, ord_by=None): def add_joined_search(field_name): joined = db(Search.index, func.min(Search.text).label('text'), func.min(Search.table_name).label('table_name'), index=subquery_search.subquery().c.index).filter( Search.kind.in_(tuple(field_name))).group_by(Search.index) return joined subquery_search = db(Search.index.label('index'), func.sum(Search.relevance).label('relevance'), func.min(Search.table_name).label('table_name'), func.min(Search.md_tm).label('md_tm'), func.max(Search.position).label('position'), func.max(Search.text).label('text')).filter( or_(*self.__get_search_params(*args))).group_by('index') if type(ord_by) in (str, list, tuple): order = self.__get_order('text', 'text') subquery_search = add_joined_search(ord_by) elif type(ord_by) == int: ord_to_str = self.__order_by_to_str[ord_by] order = self.__get_order(ord_to_str, ord_to_str) else: order = self.__get_order('relevance', 'relevance') if 'md_tm' in str(order): subquery_search = subquery_search.order_by(order) else: subquery_search = subquery_search.order_by(order).order_by( self.__get_order('md_tm', 'md_tm')) return subquery_search
def annotation_objects_in_frame(self, frame): """ Returns annotation objects related to this video that are visible in given time. AnnotationValues are lazily-loaded, in comparison to annotation_objects_in_frame_intervals() SQL: SELECT annotation_objects....., min(annotation_values.frame_from) AS min_1, max(annotation_values.frame_from) AS max_1 FROM annotation_objects INNER JOIN annotation_values ON annotation_objects.id = annotation_values.annotation_object_id WHERE annotation_objects.video_id = %s GROUP BY annotation_objects.id HAVING min(annotation_values.frame_from) <= %s AND max(annotation_values.frame_from) >= %s ORDER BY min(annotation_values.frame_from), max(annotation_values.frame_from), annotation_objects.id :rtype: list of (AnnotationObject, int, int) """ q = database.db.session.query(AnnotationObject, func.min(AnnotationValue.frame_from), func.max(AnnotationValue.frame_from)) q = q.filter_by(video_id=self.id) q = q.join(AnnotationObject.annotation_values) q = q.group_by(AnnotationObject.id) q = q.having((func.min(AnnotationValue.frame_from) <= frame) & (func.max(AnnotationValue.frame_from) >= frame)) q = q.order_by(func.min(AnnotationValue.frame_from), func.max(AnnotationValue.frame_from), AnnotationObject.id) q = q.all() return q
def __init__(self, engine=None, start_date='1925-12-31', end_date='', limit=None, all_vars=None, **kwargs): super(CCMNamesQuery, self).__init__(engine, limit) logging.info("---- Creating a CCM-MSENAMES query session. ----") msenames = self.tables['msenames'] ccmxpf_linktable = self.tables['ccmxpf_linktable'] id_vars = [msenames.c.permno, msenames.c.permco, ccmxpf_linktable.c.gvkey, msenames.c.comnam] query = sa.select(id_vars+\ [func.min(msenames.c.namedt).label('sdate'), func.max(msenames.c.nameendt).label('edate')], group_by = id_vars, order_by = id_vars, limit= self.limit).\ where(ccmxpf_linktable.c.linktype.startswith('L')).\ where(ccmxpf_linktable.c.linkprim.in_(['P','C'])).\ where(ccmxpf_linktable.c.usedflag==1).\ where((ccmxpf_linktable.c.linkdt <= msenames.c.namedt) | (ccmxpf_linktable.c.linkdt == None)).\ where((msenames.c.nameendt <= ccmxpf_linktable.c.linkenddt) | (ccmxpf_linktable.c.linkenddt == None)).\ where(msenames.c.permno == ccmxpf_linktable.c.lpermno).\ where(msenames.c.permco == ccmxpf_linktable.c.lpermco) if start_date: query = query.having(func.min(msenames.c.namedt) >= start_date) if end_date: query = query.having(func.max(msenames.c.nameendt) <= end_date) logging.debug(query) self.query = query
def get(self): args = self.parse_arg() releases = db.session.query( func.count(Release.value).label('count'), func.min(Release.activities).label('activities'), func.sum(Release.value).label('total_value'), Buyer.name.label('buyer'), func.min(Buyer.slug).label('buyer_slug')) releases = self.filter_request(releases, args) releases = releases.filter(Buyer.id == Release.buyer_id) releases = releases.group_by(Buyer.name) releases = self.sort_request(releases, args) release_count = releases.count() (releases, offset, limit) = self.offset_limit(releases, args) #Generate output structure output = dict() output["meta"] = { "count": release_count, "pagination" : {"offset" : offset, "limit": limit} } output["releases"] = [r._asdict() for r in releases] return output
def listSessions(request): ssnTmCnt = request.session.query(Session)\ .add_column(func.min(Pagerecording.time))\ .add_column(func.count(Pagerecording.id))\ .join(Pagerecording).group_by(Session)\ .order_by(func.min(Pagerecording.time).desc()).all() return render_to_response('sessionlist.mako', {'ssnTmCnt':ssnTmCnt}, request=request)
def backup_duration(bddate): s = select([pool.c.name, func.min(job.c.starttime), func.max(job.c.endtime)], use_labels=True).where(and_(job.c.poolid == pool.c.poolid, cast(job.c.schedtime,Date) <= datetime.fromtimestamp(float(bddate)), cast(job.c.schedtime,Date) >= datetime.fromtimestamp(float(bddate)) - timedelta(days=1))).group_by(pool.c.name, job.c.schedtime) bd = db.execute(s).fetchall() bd_result = {} for bpool in bd: bd_result.update({ bpool[0]: { 'start': bpool[1], 'end': bpool[2] } }) s = select([func.min(job.c.starttime), func.max(job.c.endtime)], use_labels=True).where(job.c.poolid == pool.c.poolid) _min_date, _max_date = db.execute(s).fetchone() min_date = int(mktime((strptime(str(_min_date), "%Y-%m-%d %H:%M:%S")))) max_date = int(mktime((strptime(str(_max_date), "%Y-%m-%d %H:%M:%S")))) return render_template('backup_duration.html', title="Backup duration time", bd_result=bd_result, bddate=int(bddate), min_date=min_date, max_date=max_date)
def add_joined_search(field_name): joined = ( db( Search.index, func.min(Search.text).label("text"), func.min(Search.table_name).label("table_name"), index=subquery_search.subquery().c.index, ) .filter(Search.kind.in_(tuple(field_name))) .group_by(Search.index) ) return joined
def get_order(order_name, desc_asc, field): order_name += "+" if desc_asc == "desc" else "-" result = { "text+": lambda field_name: desc(func.max(getattr(Search, field_name, Search.text))), "text-": lambda field_name: asc(func.max(getattr(Search, field_name, Search.text))), "md_tm+": lambda field_name: desc(func.min(getattr(Search, field_name, Search.md_tm))), "md_tm-": lambda field_name: asc(func.min(getattr(Search, field_name, Search.md_tm))), "relevance+": lambda field_name: desc(func.sum(getattr(Search, field_name, Search.relevance))), "relevance-": lambda field_name: asc(func.sum(getattr(Search, field_name, Search.relevance))), "position+": lambda field_name: desc(func.max(getattr(Search, field_name, Search.position))), "position-": lambda field_name: asc(func.max(getattr(Search, field_name, Search.position))), }[order_name](field) return result
def analytics_closer(): user1 = db.session.query(User).get(int(request.args.get("user1"))) user2 = db.session.query(User).get(int(request.args.get("user2"))) if request.args.get("criterion") == "artist": field = Scrobble.artist if request.args.get("criterion") == "track": field = func.concat(Scrobble.artist, Scrobble.track) start_uts = max( db.session.query(func.min(Scrobble.uts)).filter_by(user=user1), db.session.query(func.min(Scrobble.uts)).filter_by(user=user2) ) def gather_shares(user): data = {} for (share, uts) in db.session.query(field, Scrobble.uts).filter(Scrobble.user == user, Scrobble.uts >= start_uts): week = int(math.floor(uts / (86400 * 7)) * (86400 * 7)) if week not in data: data[week] = set() if share not in data[week]: data[week].add(share) return data user1_shares = gather_shares(user1) user2_shares = gather_shares(user2) if request.args.get("criterion_type") == "integral": def integrate_shares(shares): prev_week = None for week in sorted(shares.keys()): if prev_week: shares[week] = set.union(shares[week], shares[prev_week]) prev_week = week return shares user1_shares = integrate_shares(user1_shares) user2_shares = integrate_shares(user2_shares) data = [ [ date.fromtimestamp(week).strftime("%b %Y"), len(user1_shares[week] - user2_shares[week]) / float(len(user1_shares[week])), "", ", ".join(sorted(user1_shares[week] - user2_shares[week])), -len(user2_shares[week] - user1_shares[week]) / float(len(user2_shares[week])), "", ", ".join(sorted(user2_shares[week] - user1_shares[week])), ] for week in sorted(set.intersection(set(user1_shares.keys()), set(user2_shares.keys()))) ] return dict(user1=user1, user2=user2, data=json.dumps(data))
def annotation_objects_all(self): """ Returns all annotation objects related to this video, with object's first and last frame. Annotation objects are sorted by the time of theirs first occurrence in the video. :rtype: list of (AnnotationObject, int, int) """ q = database.db.session.query(AnnotationObject, func.min(AnnotationValue.frame_from), func.max(AnnotationValue.frame_from)) q = q.filter_by(video_id=self.id) q = q.join(AnnotationObject.annotation_values) q = q.group_by(AnnotationObject.id) q = q.order_by(func.min(AnnotationValue.frame_from), func.max(AnnotationValue.frame_from), AnnotationObject.id) return q.all()
def find_day2scrobbles(user, artist): day2scrobbles = OrderedDict([(day, 0) for day in range(int(db.session.query(func.coalesce(func.min(Scrobble.uts), 0)).\ filter(Scrobble.user == user, Scrobble.artist == artist).\ scalar() / 86400), int(db.session.query(func.coalesce(func.max(Scrobble.uts), 0)).\ filter(Scrobble.user == user, Scrobble.artist == artist).\ scalar() / 86400) + 1)]) for uts, in db.session.query(Scrobble.uts).\ filter(Scrobble.user == user, Scrobble.artist == artist): day2scrobbles[int(uts / 86400)] += 1 for day in day2scrobbles: if day2scrobbles[day] < 4: day2scrobbles[day] = 0 for day in day2scrobbles: if day2scrobbles[day] != 0: break del day2scrobbles[day] for day in reversed(day2scrobbles): if day2scrobbles[day] != 0: break del day2scrobbles[day] return day2scrobbles
def statArtistReleaseCount(self): artistAverageCount = self.dbSession.query(func.avg(Artist.releases)) artistMinimumCount = self.dbSession.query(func.min(Artist.releases)).scalar() artistMaximumCount = self.dbSession.query(func.max(Artist.releases)).scalar() artistMaximumId = '' artistMaxName = '' return { 'title': 'Artist Release Count', 'class': 'fa-user', 'average': { 'type': 'string', 'value': artistAverageCount, 'detail': { 'text': '' } }, 'minimum': { 'type': 'string', 'value': artistMinimumCount, 'detail': { 'text': 'Many' } }, 'maximum': { 'type': 'artist', 'value': artistMaximumCount, 'detail': { 'id': artistMaximumId, 'thumbnailUrl': '/images/artist/thumbnail/' + artistMaximumId, 'detailUrl': '/artist/' + artistMaximumId, 'text': artistMaxName } } }
def show_stats(): joint = query.for_field(Transaction.account).filter_by(person='') timespan = query.all() \ .add_columns(func.max(Transaction.date).label('maxDate')) \ .add_columns(func.min(Transaction.date).label('minDate')) datespan = timespan[0][0] - timespan[0][1] merchants = query.group_format(Transaction.merchant) top_merchants = OrderedDict() for key in merchants.keys()[0:20]: top_merchants[key] = merchants[key] amount_data = [ make_template_data(query.group_format(Transaction.person), "per person"), make_template_data(query.group_format(Transaction.account, joint), "joint transactions by account"), make_template_data(query.group_format(Transaction.category), "all transactions by category"), make_template_data(top_merchants, "top 20 merchants") ] return flask.render_template('stats.html', datespan=datespan, number_of_days=datespan.total_seconds() / (60*60*24), number_of_months=datespan.total_seconds() / (60*60*24*30), amount_data=amount_data)
def associate_activities(user, before=None, after=None): assert user.has_strava() moves_by_date_time = {} for id, date_time in db.session.query(Sample.move_id, func.min(Sample.utc)) \ .join(Move) \ .filter(Sample.utc != None) \ .filter(Move.user == user) \ .group_by(Sample.move_id): utc = date_time.replace(tzinfo=pytz.UTC) moves_by_date_time[utc] = id moves_by_strava_activity_id = {} for id, strava_activity_id in db.session.query(Move.id, Move.strava_activity_id) \ .filter(Move.user == user) \ .filter(Move.strava_activity_id != None): moves_by_strava_activity_id[strava_activity_id] = id new_strava_activities = [] associated_strava_activities = [] known_strava_activities = [] client = get_strava_client(user) for activity in client.get_activities(before=before, after=after): move_id = None start_date = activity.start_date if activity.id in moves_by_strava_activity_id: move_id = moves_by_strava_activity_id[activity.id] elif start_date in moves_by_date_time: move_id = moves_by_date_time[start_date] else: for date_time in moves_by_date_time.keys(): start_date_delta = abs(date_time - start_date) start_date_local_delta = abs(date_time - activity.start_date_local.replace(tzinfo=pytz.UTC)) max_delta = timedelta(seconds=30) if start_date_delta <= max_delta or start_date_local_delta <= max_delta: move_id = moves_by_date_time[date_time] break if not move_id: potential_moves = [] for date_time in moves_by_date_time.keys(): start_date_delta = abs(date_time - start_date) if -MAX_DATE_TIME_OFFSET <= start_date_delta <= MAX_DATE_TIME_OFFSET: potential_moves.append(moves_by_date_time[date_time]) if len(potential_moves) == 1: move_id = potential_moves[0] elif len(potential_moves) > 1: # too many candidates found pass if not move_id: new_strava_activities.append(activity) elif activity.id in moves_by_strava_activity_id: known_strava_activities.append(activity) else: move = Move.query.filter_by(id=move_id).one() move.strava_activity_id = activity.id db.session.commit() associated_strava_activities.append((activity, move)) return associated_strava_activities, known_strava_activities, new_strava_activities
def get_min_id(self) : rows = self.conn.execute(select([func.min(message.c.yammer_id)])) result = rows.fetchone() if result : return result[0] else : return None
def tweet_anniversaries(): now = datetime.now() now_uts = time.mktime(now.timetuple()) builders = {True: PositiveAnniversaryBuilder(), False: NegativeAnniversaryBuilder()} for user in db.session.query(User).\ filter(User.download_scrobbles == True, User.twitter_username != None, User.twitter_track_artist_anniversaries == True): max_possible_milestone = int(math.floor((now_uts - db.session.query(func.min(Scrobble.uts)). filter(Scrobble.user == user). scalar()) / (365 * 86400))) for positive, builder in builders.iteritems(): for user_artist in builder.query(now, user, max_possible_milestone): anniversary = builder.anniversary_for(user_artist, now_uts) if db.session.query(Anniversary).\ filter(Anniversary.user == user, Anniversary.artist == user_artist.artist, Anniversary.anniversary == anniversary, Anniversary.positive == positive).\ first() is None: a = Anniversary() a.user = user a.artist = user_artist.artist a.anniversary = anniversary a.positive = positive db.session.add(a) post_tweet(user, builder.anniversary_tweet(user_artist, anniversary)) db.session.commit()
def stats(self, survey_id): """Get stats for a survey.""" result = ( self.session .query( func.max(Survey.created_on), func.min(Submission.save_time), func.max(Submission.save_time), func.count(Submission.id), ) .select_from(Submission) .join(Survey) # TODO: ask @jmwohl what this line is supposed to do # .filter(User.id == self.current_user_model.id) .filter(Submission.survey_id == survey_id) .one() ) response = { "created_on": result[0], "earliest_submission_time": result[1], "latest_submission_time": result[2], "num_submissions": result[3] } return response
def get(self): args = self.parse_arg() releases = db.session.query( Release.procuring_entity.label('procuring_entity'), func.min(Release.procuring_entity_slug).label('procuring_entity_slug'), func.sum(Release.value).label('total_value'), func.count(Release.value).label('count')) releases = self.filter_request(releases, args) releases = releases.group_by(Release.procuring_entity) releases = self.sort_request(releases, args) release_count = releases.count() (releases, offset, limit) = self.offset_limit(releases, args) #Generate output structure output = dict() output["meta"] = { "count": release_count, "pagination" : {"offset" : offset, "limit": limit} } output["releases"] = [r._asdict() for r in releases] return output
def findMinAndMax(name,field): min = {"name":"Min","value":db.session.query(func.min(field)).first()[0]} max = {"name":"Max","value":db.session.query(func.max(field)).first()[0]} array = {"name":name,"values":[min,max]} return array
def get(self, bucket_id): b = Bucket.query.filter_by(id=bucket_id).first() if b is None: return {'status':'error', 'description':'Bucket ' + bucket_id + ' does not exists.'}, 204 u = User.query.filter_by(id=b.user_id).first() if u.id != g.user.id and b.private != '0': return {'status':'error', 'description':'Private Bucket'}, 401 result = db.session.query(Post.date).filter(Post.bucket_id==bucket_id).distinct(Post.date).all() data = {} if len(result) == 0: return {'status':'error', 'description':'No rows returned'}, 204 else: dateList = [] for i in range(len(result)): dateList.append(result[i][0]) data['count'] = len(result) data['minDate'] = db.session.query(func.min(Post.date).label("min_date")).filter(Post.bucket_id==bucket_id).first().min_date data['maxDate'] = db.session.query(func.max(Post.date).label("max_date")).filter(Post.bucket_id==bucket_id).first().max_date data['dateList'] = dateList return {'status':'success', 'description': 'Data successfully returned.', 'data':data}, 200
def daily_base_query(): dt = date_col() date = func.date(dt) dm = func.min(date).label("date") # base query, daily means query_by_day = select([dm]).group_by(date) query_by_day = query_by_day.order_by(dt) return query_by_day, dt
def inject_archives(): """TODO: inject archive list into template context""" min_max_pubdates = db_session.query( func.min(Post.pubdate), func.max(Post.pubdate)).filter(filter_public()) #min_year = min_max_pubdates[0].year() #max_year = min_max_pubdates[1].year() #archives = [] return dict(min_max_pubdates=min_max_pubdates)
def collect_table_data(base, session): """ Read table information from database and return dict: ``` { "table_name_1": { "row_count": 100, "columns": { "col_name_1": { "type": "VARCHAR", "primary_key": true, "min_value": "aaa", "max_value": "zzz" } } }, ... } ``` :param base: base :param session: session :return: dict """ all_tables = {} for table_class in base.classes: table_obj = table_class.__table__ debug_message('--', table_obj.name) all_tables[table_obj.name] = { 'row_count': session.query(table_obj).count(), 'columns': {} } for table_col in table_obj.columns: col_dict = { 'type': table_col.type.__visit_name__, 'primary_key': table_col.primary_key, } if table_col.primary_key: minv = session.query(func.min(table_col)).first()[0] maxv = session.query(func.max(table_col)).first()[0] if isinstance(table_col.type, DateTime): minv = str(minv) maxv = str(maxv) col_dict['min_value'] = minv col_dict['max_value'] = maxv all_tables[table_obj.name]['columns'][table_col.name] = col_dict debug_message(' ', table_col.name, table_col.type) debug_message('Processed {0} tables.'.format(len(all_tables))) return all_tables
def generate_existing_player(cls): qry = db.session.query(func.min(cls.id).label('min_id'), func.max(cls.id).label('max_id')) res = qry.one() while True: seq = list(range(res.min_id, res.max_id+1)) random.shuffle(seq) for p_id in seq: yield cls.query.get(p_id)
def oldest_listening_uts(self): """ Retrieves the UTC Unix Timestamp corresponding to the oldest listened-to track in the database. :returns: an int UTC Unix Timestamp """ query = self.session.query(func.min(Track.listening_uts).label("min_uts"),) self.session.close() return query.one().min_uts
def _get_start_end_dates(self): '''find the min & max date using Trip & UniversalCalendar''' if not self.is_cached_data_valid('_start_date'): from gtfsdb.model.calendar import UniversalCalendar q = self.session.query(func.min(UniversalCalendar.date), func.max(UniversalCalendar.date)) q = q.filter(UniversalCalendar.trips.any(route_id=self.route_id)) self._start_date, self._end_date = q.one() self.update_cached_data('_start_date') return self._start_date, self._end_date
def load_old_data(): """ Gets old data for each tags and updates the database. If DB contains data from 01/01/2015, the execution of this method will load the statistics from 31/01/2014. """ qry = db_session.query(func.max(Question.date).label("max_date"), func.min(Question.date).label("min_date")) maxx, minn = qry.all()[0] day_before = minn - datetime.timedelta(days=1) update_questions(day_before)
def __get_order(self, order_name, field): order_name += '+' if self.__desc_asc == 'desc' else '-' result = {'text+': lambda field_name: desc(func.max(getattr(Search, field_name, Search.text))), 'text-': lambda field_name: asc(func.max( getattr(Search, field_name, Search.text))), 'md_tm+': lambda field_name: desc(func.min( getattr(Search, field_name, Search.md_tm))), 'md_tm-': lambda field_name: asc(func.min( getattr(Search, field_name, Search.md_tm))), 'relevance+': lambda field_name: desc(func.sum( getattr(Search, field_name, Search.relevance))), 'relevance-': lambda field_name: asc(func.sum( getattr(Search, field_name, Search.relevance))), 'position+': lambda field_name: desc(func.max( getattr(Search, field_name, Search.position))), 'position-': lambda field_name: asc(func.max( getattr(Search, field_name, Search.position))) }[order_name](field) return result
def campaign_date_calls(campaign_id): start = request.values.get('start') end = request.values.get('end') timespan = request.values.get('timespan', 'day') if timespan not in API_TIMESPANS.keys(): abort(400, 'timespan should be one of %s' % ','.join(API_TIMESPANS)) else: timespan_strf, timespan_to_char = API_TIMESPANS[timespan] campaign = Campaign.query.filter_by(id=campaign_id).first_or_404() timestamp_to_char = func.to_char(Call.timestamp, timespan_to_char).label(timespan) query = ( db.session.query( func.min(Call.timestamp.label('date')), timestamp_to_char, Call.status, func.count(distinct(Call.id)).label('calls_count') ) .filter(Call.campaign_id == int(campaign.id)) .group_by(timestamp_to_char) .order_by(timespan) .group_by(Call.status) ) if start: try: startDate = dateutil.parser.parse(start) except ValueError: abort(400, 'start should be in isostring format') query = query.filter(Call.timestamp >= startDate) if end: try: endDate = dateutil.parser.parse(end) if start: if endDate < startDate: abort(400, 'end should be after start') if endDate == startDate: endDate = startDate + timedelta(days=1) except ValueError: abort(400, 'end should be in isostring format') query = query.filter(Call.timestamp <= endDate) dates = defaultdict(dict) for (date, timespan, call_status, count) in query.all(): # combine status values by date for status in TWILIO_CALL_STATUS: if call_status == status: date_string = date.strftime(timespan_strf) dates[date_string][status] = count sorted_dates = OrderedDict(sorted(dates.items())) return jsonify({'objects': sorted_dates})
def __init__(self, engine=None, start_date='1925-12-31', end_date='', limit=None, all_vars=None, **kwargs): super(CCMNamesQuery, self).__init__(engine, limit) logging.info("---- Creating a CCM-MSENAMES query session. ----") msenames = self.tables['msenames'] ccmxpf_linktable = self.tables['ccmxpf_linktable'] id_vars = [ msenames.c.permno, msenames.c.permco, ccmxpf_linktable.c.gvkey, msenames.c.comnam ] query = sa.select(id_vars+\ [func.min(msenames.c.namedt).label('sdate'), func.max(msenames.c.nameendt).label('edate')], group_by = id_vars, order_by = id_vars, limit= self.limit).\ where(ccmxpf_linktable.c.linktype.startswith('L')).\ where(ccmxpf_linktable.c.linkprim.in_(['P','C'])).\ where(ccmxpf_linktable.c.usedflag==1).\ where((ccmxpf_linktable.c.linkdt <= msenames.c.namedt) | (ccmxpf_linktable.c.linkdt == None)).\ where((msenames.c.nameendt <= ccmxpf_linktable.c.linkenddt) | (ccmxpf_linktable.c.linkenddt == None)).\ where(msenames.c.permno == ccmxpf_linktable.c.lpermno).\ where(msenames.c.permco == ccmxpf_linktable.c.lpermco) if start_date: query = query.having(func.min(msenames.c.namedt) >= start_date) if end_date: query = query.having(func.max(msenames.c.nameendt) <= end_date) logging.debug(query) self.query = query
def addAbandonedMatch(p1Id: int, p2Id: int, n_maps: int, match_name: str) -> Match: minMatchId = min( db.session.query(func.min(Match.id).label('min_id')).one().min_id, 0) - 1 # Check if player is already registered else create a new object p1 = Player.query.get(p1Id) if p1 is None: p1 = getPlayerObj(p1Id) p2 = Player.query.get(p2Id) if p2 is None: p2 = getPlayerObj(p2Id) match = Match(id=minMatchId, name=match_name, start_time=datetime.now(), end_time=datetime.now(), players=[p1, p2], games=[]) db.session.add(match) eloDiff = min(abs(p1.elo - p2.elo), 400) delR = defaultdict(lambda: 0) for _ in range(n_maps): for player in [p1, p2]: if player == p1: points = 1 opponentElo = p2.elo else: points = 0 opponentElo = p1.elo row = EloDiff.query.filter(EloDiff.ll <= eloDiff, EloDiff.ul >= eloDiff).one() if player.elo >= opponentElo: pd = row.high else: pd = row.low logger.debug( f'score.points : {points}, pd : {pd}, change : {points - pd}') delR[player.id] += (points - pd) logger.debug( f'player elo = {player.elo}, opponent elo = {opponentElo}') logger.debug(delR) processDelR(delR, match.id) return match
def oldest_listening_uts(self): """ Retrieves the UTC Unix Timestamp corresponding to the oldest listened-to track in the database. :returns: an int UTC Unix Timestamp """ query = self.session.query( func.min(Track.listening_uts).label("min_uts"), ) self.session.close() return query.one().min_uts
def get_years(model): if model == "distinct": q = DB.session.query( label("year", distinct(func.date_part("year", VSynthese.date_min))) ).order_by("year") if model == "min-max": q = DB.session.query( func.min(func.date_part("year", VSynthese.date_min)), func.max(func.date_part("year", VSynthese.date_min)), ) return q.all()
def temp_ranges(start, end_date): """Return the json list of min, average and max temperature for a given date range""" min_temp = session.query(func.min(Measurements.tobs)).\ filter(Measurements.date.between(start, end_date)).first() avg_temp = session.query(func.avg(Measurements.tobs)).\ filter(Measurements.date.between(start, end_date)).first() max_temp = session.query(func.max(Measurements.tobs)).\ filter(Measurements.date.between(start, end_date)).first() tobs_data = [min_temp, avg_temp, max_temp] return jsonify(tobs_data)
def _get_artifact_creation_date(artifact_id, session): """Returns the data of the first touch recorded against an artifact. :param artifact_id: A valid artifact id. :returns: timestamp of first touch (str) """ change_dt = (session .query(func.min(Touch.touch_dt)) .filter(Touch.artifact_id == artifact_id) .first()) return change_dt
def index(): """ render the home/index page notice the prequery here for the absolute max/min ages: these are passed to the template for use in the slider control """ mx = db.session.query(func.max(Artist.age)).scalar() mn = db.session.query(func.min(Artist.age)).scalar() return render_template('index.html', mx=mx, mn=mn)
def _get_start_end_dates(self): """find the min & max date using Trip & UniversalCalendar""" if not self.is_cached_data_valid('_start_date'): from gtfsdb.model.calendar import UniversalCalendar q = self.session.query(func.min(UniversalCalendar.date), func.max(UniversalCalendar.date)) q = q.filter(UniversalCalendar.trips.any(route_id=self.route_id)) self._start_date, self._end_date = q.one() self.update_cached_data('_start_date') return self._start_date, self._end_date
def random_data(): rng = settings.Session.query(func.min(Comment163.id), func.max(Comment163.id)).all()[0] data = {} for i in range(12): v = random.uniform(rng[0], rng[1]) d = settings.engine.execute( "select txt,liked,a.author,song_name,a.song_id,b.author from comment163 a inner join music163 b on a.song_id= b.song_id where a.id>" + str(v) + " limit 1").fetchone() data[d[3]] = [d[0], str(d[1]), d[2], str(d[4]), d[5]] return data
def test_lowest_left_is_always_1(self): """ The lowest left key is always 1. The following example should return 1. .. code-block:: sql SELECT MIN(left) FROM tree """ table = self.model one = self.session.query(func.min(table.left)).scalar() self.assertEqual(one, 1)
def news(): raterlist = db.session.execute( '''select rater.name, count(rating) as ratings from rater join rating on rater.user_id=rating.user_id group by rater.name''').fetchall() # restaurants restaurants = db.session().query( Restaurant ,Rating , Location).\ filter( func.extract('year',Rating.date) == '2015').\ filter( Location.business_id == Restaurant.business_id)\ .limit(5).all() restaurant, _, location = zip(*restaurants) # raters raters = db.session().query(Rater, func.min( Rating.mood), Location, Restaurant).filter( func.upper(Rater.name).like( 'Peter'.upper()) # since 0 is Staff in our description ).filter(Rating.business_id == Location.business_id).filter( Restaurant.business_id == Rating.business_id).group_by( Rater.user_id, Rating.mood, Location.location_id, Restaurant.business_id, Rating.date).order_by(asc(Rating.mood), func.DATE(Rating.date)).limit(20).all() # rest_spec rest_spec = raters[len(raters) - 1][3] other_critics = db.session.query( Restaurant, Rating.date, Rating.mood).filter( Restaurant.business_id == rest_spec.business_id).group_by( Restaurant.business_id, Rating.id, Rating.date).order_by(Rating.date).limit(10).all() Best_cate = db.session.execute(''' select rating,food_cate.cate_type from rating join ( select unnest(restaurant.food_type) as cate_type, restaurant.business_id as b_id from restaurant ) as food_cate on rating.business_id = food_cate.b_id where food_cate.cate_type='{}' group by ( food_cate.cate_type , rating.mood , rating.* ) having ( rating.mood >= avg(rating.mood) ) order by rating.mood desc '''.format("Active Life")).fetchall() os.system("clear") return render_template('news.html', restaurant=restaurant[0], location=location[0], johns=raters[0], others=other_critics)
def dashboard_view(): object_list = session.query( Request.route, Request.method, func.avg(Request.duration).label('time_avg'), func.min(Request.duration).label('time_min'), func.max(Request.duration).label('time_max'), func.count(Request.id).label('count'), ).group_by(Request.route, Request.method) return render_template('profiler_index.html', object_list=object_list.all())
def factor(self): baseline = self.career_task.bonus_baseline if baseline is None: baseline = db.session.query(func.min(TaskReading.bonus)).filter_by( career_task_id=self.career_task_id).first() if baseline is None: return None baseline = baseline[0] self.career_task.bonus_baseline = baseline db.session.add(self.career_task) return self.bonus / baseline
def index(): # It is pretty easy for some tasks: people = Person.query.all() by_name = Person.query.filter_by(name='Sveta').first() by_age = Person.query.filter(Person.age >= 30) by_job = Person.query.filter(Person.job == 'HR') # And not so easy for others: sub = db.session.query(func.min(Person.age).label('min_age')).subquery() youngest = Person.query.join(sub, sub.c.min_age == Person.age).first() return jsonify([p.to_dict() for p in people])
def delayed_jobs(self): """ Get Delayed Jobs """ query = Job.query.filter(Job.owner == self.user)\ .join(Job.recipesets)\ .filter(and_(RecipeSet.queue_time <= (datetime.utcnow() - timedelta(days=self.delayed_job_age)), RecipeSet.status == TaskStatus.queued))\ .group_by(Job.id)\ .values(func.min(RecipeSet.queue_time), Job.id) return [(queue_time, absolute_url('/jobs/%s' % job_id)) for queue_time, job_id in query]
def campaigns_overall(): start = request.values.get('start') end = request.values.get('end') timespan = request.values.get('timespan', 'day') if timespan not in API_TIMESPANS.keys(): abort(400, 'timespan should be one of %s' % ','.join(API_TIMESPANS)) else: timespan_strf, timespan_to_char = API_TIMESPANS[timespan] timestamp_to_char = func.to_char(Call.timestamp, timespan_to_char).label(timespan) query = (db.session.query( func.min(Call.timestamp.label('date')), Call.campaign_id, timestamp_to_char, func.count(distinct(Call.id)).label('calls_count')).group_by( Call.campaign_id).group_by(timestamp_to_char).order_by(timespan)) completed_query = db.session.query(Call.timestamp, Call.id).filter_by(status='completed') if start: try: startDate = dateutil.parser.parse(start) except ValueError: abort(400, 'start should be in isostring format') query = query.filter(Call.timestamp >= startDate) completed_query = completed_query.filter(Call.timestamp >= startDate) if end: try: endDate = dateutil.parser.parse(end) if start: if endDate < startDate: abort(400, 'end should be after start') if endDate == startDate: endDate = startDate + timedelta(days=1) except ValueError: abort(400, 'end should be in isostring format') query = query.filter(Call.timestamp <= endDate) completed_query = completed_query.filter(Call.timestamp <= endDate) dates = defaultdict(dict) for (date, campaign_id, timespan, count) in query.all(): date_string = date.strftime(timespan_strf) dates[date_string][int(campaign_id)] = count sorted_dates = OrderedDict(sorted(dates.items())) meta = {'calls_completed': completed_query.count()} return jsonify({'meta': meta, 'objects': sorted_dates})
def get_average_rates(base_currency_code): """ URL examples: exchange_rates/EUR/average?currency_codes=USD,AUD exchange_rates/EUR?currency_codes=USD,AUD&start_date=2018-01-31 exchange_rates/EUR?currency_codes=USD,AUD&start_date=2018-01-31&end_date=2018-02-15 """ validation_error = Check(request.args).validate() if validation_error is not None: return make_response(validation_error, 422) result = [] currency_codes = request.args.get('currency_codes') start_date = request.args.get('start_date') end_date = request.args.get('end_date') exchange_rates = Datapoint.query.filter(Datapoint.base_currency_code == base_currency_code) if currency_codes is not None: currency_codes = currency_codes.split(',') exchange_rates = exchange_rates.filter(Datapoint.currency_code.in_(currency_codes)) if start_date is None: # Then we need to select minimal possible date start_date = exchange_rates.with_entities(func.min(Datapoint.date).label('min_date')).first() start_date = start_date.min_date else: start_date = datetime.strptime(start_date, "%Y-%m-%d") exchange_rates = exchange_rates.filter(Datapoint.date >= start_date) if end_date is None: # Then we need to select maximal possible date end_date = exchange_rates.with_entities(func.max(Datapoint.date).label('max_date')).first() end_date = end_date.max_date else: end_date = datetime.strptime(end_date, "%Y-%m-%d") exchange_rates = exchange_rates.filter(Datapoint.date <= end_date) exchange_rates = exchange_rates.\ group_by(Datapoint.currency_code).\ with_entities(Datapoint.currency_code,func.avg(Datapoint.rate).label('average')).\ all() for exchange_rate in exchange_rates: result.append({ 'base_currency_code':base_currency_code, 'currency_code':exchange_rate.currency_code, 'average_rate':exchange_rate.average, 'start_date':datetime.strftime(start_date, "%Y-%m-%d"), 'end_date':datetime.strftime(end_date, "%Y-%m-%d") }) return jsonify(result)
def findMinAndMax(name, field): min = { "name": "Min", "value": db.session.query(func.min(field)).first()[0] } max = { "name": "Max", "value": db.session.query(func.max(field)).first()[0] } array = {"name": name, "values": [min, max]} return array
def plot_sampled(engine: sqlalchemy.engine.Connectable, t_hex: sqlalchemy.Table) -> None: d = sqlalchemy.select([ t_hex.c.vlongitude, t_hex.c.vlatitude, func.min(t_dist.c.distance), t_hex.c.habitable ]).where(t_hex.c.hexbin == t_dist.c.hexbin1).where( t_dist.c.distance > 0).group_by(t_hex.c.hexbin) x, y, s, h = zip(*engine.execute(d)) s = numpy.array(s) h = numpy.array(h) plt.scatter(x, y, marker='o', s=s / 3600, alpha=0.3, c='r') er = ecoregion_tile_from_geocoordinates(-165, 60).read(1) plt.imshow(-TC[er], extent=(-180, -150, 50, 70))
def nearest_by_birthday_athelete(user_birthdate, session): """ Нахождение ближайшего атлета осуществляется средствами СУБД выбирается запись с минимальной по модулю разницей между днем рождения атлета и днем рождения пользователя """ query = session.query( Athelete.name, Athelete.birthdate, func.min( func.abs( func.julianday(Athelete.birthdate) - func.julianday(user_birthdate)))).first() return query
def histogram(cls, resolution=YEARLY, date_range=None, date_attrname='date', session=None): """ Group by year month day hour """ s = session or Session(dburi) count, mindate, maxdate = ( s.query( func.count('*'), func.min('date'), func.max('date'), ) .first() ) dateattr = date_attrname query = ( s.query( func.count('*').label('row_count') ) .order_by(dateattr) ) if date_range: start , end = date_range query = (query .filter(dateattr >= start) .filter(dateattr <= end)) if resolution == YEARLY: bin_generator = yearly_bins(mindate.year, maxdate.year) elif resolution == DAILY: bin_generator = daily_bins(mindate, maxdate) elif resolution == HOURLY: bin_generator = hourly_bins(mindate, maxdate) elif resolution == MINUTELY: bin_generator == minutely_bins(mindate, maxdate, minutes = 15) for label, start, end in bin_generator: yield (label, (query .filter(dateattr >= start) .filter(dateattr <= end) .first())) # TODO:
def get(self): from_ts = dateutil.parser.parse(request.args.get('from_ts')) to_ts = dateutil.parser.parse(request.args.get('to_ts')) result = WeatherMetric.query \ .with_entities( func.avg(WeatherMetric.temperature).label('avg-temperature'), func.min(WeatherMetric.temperature).label('min-temperature'), func.max(WeatherMetric.temperature).label('max-temperature'), func.avg(WeatherMetric.humidity).label('avg-humidity'), func.min(WeatherMetric.humidity).label('min-humidity'), func.max(WeatherMetric.humidity).label('max-humidity'), func.avg(WeatherMetric.precipitation).label('avg-precipitation'), func.min(WeatherMetric.precipitation).label('min-precipitation'), func.max(WeatherMetric.precipitation).label('max-precipitation'), func.avg(WeatherMetric.wind_speed).label('avg-wind-speed'), func.min(WeatherMetric.wind_speed).label('min-wind-speed'), func.max(WeatherMetric.wind_speed).label('max-wind-speed')) \ .filter(WeatherMetric.collection_ts >= from_ts) \ .filter(WeatherMetric.collection_ts <= to_ts) \ .first() # quick and dirty # must be a better way to get dict from query summary = { 'avg-temperature': float(result[0]), 'min-temperature': result[1], 'max-temperature': result[2], 'avg-humidity': float(result[3]), 'min-humidity': result[4], 'max-humidity': result[5], 'avg-precipitation': float(result[6]), 'min-precipitation': result[7], 'max-precipitation': result[8], 'avg-wind_speed': float(result[9]), 'min-wind_speed': result[10], 'max-wind_speed': result[11] } return jsonify(summary)
def test_populate_awstags_summary_table(self): """Test that the AWS tags summary table is populated.""" bill_ids = [] ce_table_name = AWS_CUR_TABLE_MAP['cost_entry'] tags_summary_name = AWS_CUR_TABLE_MAP['tags_summary'] ce_table = getattr(self.accessor.report_schema, ce_table_name) today = DateAccessor().today_with_timezone('UTC') last_month = today - relativedelta.relativedelta(months=1) for cost_entry_date in (today, last_month): bill = self.creator.create_cost_entry_bill(cost_entry_date) bill_ids.append(str(bill.id)) cost_entry = self.creator.create_cost_entry(bill, cost_entry_date) for family in [ 'Storage', 'Compute Instance', 'Database Storage', 'Database Instance' ]: product = self.creator.create_cost_entry_product(family) pricing = self.creator.create_cost_entry_pricing() reservation = self.creator.create_cost_entry_reservation() self.creator.create_cost_entry_line_item( bill, cost_entry, product, pricing, reservation) start_date, end_date = self.accessor._session.query( func.min(ce_table.interval_start), func.max(ce_table.interval_start)).first() query = self.accessor._get_db_obj_query(tags_summary_name) initial_count = query.count() self.accessor.populate_line_item_daily_table(start_date, end_date, bill_ids) self.accessor.populate_line_item_daily_summary_table( start_date, end_date, bill_ids) self.accessor.populate_tags_summary_table() self.assertNotEqual(query.count(), initial_count) tags = query.all() tag_keys = [tag.key for tag in tags] self.accessor._cursor.execute( """SELECT DISTINCT jsonb_object_keys(tags) FROM reporting_awscostentrylineitem_daily""") expected_tag_keys = self.accessor._cursor.fetchall() expected_tag_keys = [tag[0] for tag in expected_tag_keys] self.assertEqual(sorted(tag_keys), sorted(expected_tag_keys))
def get_database_days(start, end): """Returns the first and the last day in aircraft_beacons table.""" if start is None and end is None: days_from_db = db.session.query(func.min(AircraftBeacon.timestamp).label("first_day"), func.max(AircraftBeacon.timestamp).label("last_day")).one() start = days_from_db[0].date() end = days_from_db[1].date() else: start = datetime.strptime(start, "%Y-%m-%d").date() end = datetime.strptime(end, "%Y-%m-%d").date() days = get_days(start, end) return days
def daily_query_1(gage, dry_value): "Query for daily average data for single gage. Result will have these columns: date, averaged data (or None), flag, min, max, count" query_by_day, _ = daily_base_query() flag, val, raw = daily_columns(gage, dry_value) query_by_day = query_by_day.column(val.label('average')) query_by_day = query_by_day.column(flag.label('flag')) # just to verify that we are really grouping query_by_day = query_by_day.column(func.min(raw).label("min")) query_by_day = query_by_day.column(func.max(raw).label("max")) query_by_day = query_by_day.column(func.count(1).label("count")) return query_by_day
def test_populate_line_item_aggregates_table(self): """Test that the aggregates table is populated.""" ce_table_name = AWS_CUR_TABLE_MAP['cost_entry'] agg_table_name = AWS_CUR_TABLE_MAP['line_item_aggregates'] ce_table = getattr(self.accessor.report_schema, ce_table_name) agg_table = getattr(self.accessor.report_schema, agg_table_name) expected_time_scope_values = [-1, -2, -10] expected_report_types = ['storage', 'instance_type', 'costs'] for _ in range(25): bill = self.creator.create_cost_entry_bill() cost_entry = self.creator.create_cost_entry(bill) product = self.creator.create_cost_entry_product() pricing = self.creator.create_cost_entry_pricing() reservation = self.creator.create_cost_entry_reservation() self.creator.create_cost_entry_line_item(bill, cost_entry, product, pricing, reservation) start_date, end_date = self.accessor._session.query( func.min(ce_table.interval_start), func.max(ce_table.interval_start)).first() query = self.accessor._get_db_obj_query(agg_table_name) initial_count = query.count() self.accessor.populate_line_item_daily_table(start_date, end_date) self.accessor.populate_line_item_daily_summary_table( start_date, end_date) self.accessor.populate_line_item_aggregate_table() self.assertNotEqual(query.count(), initial_count) time_scope_values = self.accessor._session\ .query(agg_table.time_scope_value)\ .group_by(agg_table.time_scope_value)\ .all() time_scope_values = [val[0] for val in time_scope_values] report_types = self.accessor._session\ .query(agg_table.report_type)\ .group_by(agg_table.report_type)\ .all() report_types = [val[0] for val in report_types] for val in expected_time_scope_values: self.assertIn(val, time_scope_values) for report in expected_report_types: self.assertIn(report, report_types)
def other_oprate(): ret = session.query(Users).filter_by(name='alex').all() ret = session.query(Users).filter(Users.id > 1, Users.name == 'eric').all() ret = session.query(Users).filter(Users.id.between(1, 3), Users.name == 'eric').all() ret = session.query(Users).filter(Users.id.in_([1, 3, 4])).all() ret = session.query(Users).filter(~Users.id.in_([1, 3, 4])).all() ret = session.query(Users).filter( Users.id.in_(session.query(Users.id).filter_by(name='eric'))).all() ret = session.query(Users).filter(and_(Users.id > 3, Users.name == 'eric')).all() ret = session.query(Users).filter(or_(Users.id < 2, Users.name == 'eric')).all() ret = session.query(Users).filter( or_(Users.id < 2, and_(Users.name == 'eric', Users.id > 3), Users.extra != "")).all() print('and_ and or_:', ret) ret = session.query(Users).filter(Users.name.like('e%')).all() print('%:', ret) ret = session.query(Users).order_by(Users.name.desc(), Users.id.asc()).all() print('order:', ret) ret = session.query(func.max(Users.id), func.sum(Users.id), func.min(Users.id)).group_by( Users.name).having(func.min(Users.id) > 2).all() print('group:', ret) ret = session.query(Person).join(Favor, isouter=True).all() print('join:', ret) q1 = session.query(Users.name).filter(Users.id > 2) q2 = session.query(Favor.caption).filter(Favor.nid < 2) ret = q1.union_all(q2).all() print('union:', ret)
def _get_top_users_best_run_query(self, competition: Competition, sex: Sex, age: Age): """ Returns query for top users in the best run activity in a specified competition. :param competition: Competition where we want top users for the best run. :param sex: Sex of users for the top users list. :param age: Age category of users for the top users list. :returns: Subquery returning User and Activity for user´s best run. """ best_times = db.session.query(Activity.user_id.label('user_id'), func.min(Activity.average_duration_per_km).label('best_time')). \ filter(func.date(Activity.datetime) >= self.SEASON.start_date, func.date(Activity.datetime) <= self.SEASON.end_date, Activity.type == ActivityType.Run, Activity.distance >= competition.value). \ group_by(Activity.user_id). \ subquery(with_labels=True) first_best_times = db.session.query(Activity.user_id.label('user_id'), func.min(Activity.id).label('id')). \ select_from(Activity). \ join(best_times, db.and_(Activity.user_id == best_times.c.user_id, Activity.average_duration_per_km == best_times.c.best_time)). \ filter(func.date(Activity.datetime) >= self.SEASON.start_date, func.date(Activity.datetime) <= self.SEASON.end_date, Activity.type == ActivityType.Run, Activity.distance >= competition.value). \ group_by(Activity.user_id). \ subquery(with_labels=True) return db.session.query(User, Activity). \ select_from(User). \ join(first_best_times, User.id == first_best_times.c.user_id). \ join(Activity, db.and_(Activity.user_id == first_best_times.c.user_id, Activity.id == first_best_times.c.id)). \ filter(User.sex == sex, User.age == age, User.competing, User.verified). \ order_by(Activity.average_duration_per_km.asc())
def test_populate_line_item_daily_summary_table(self): """Test that the daily summary table is populated.""" ce_table_name = AWS_CUR_TABLE_MAP['cost_entry'] summary_table_name = AWS_CUR_TABLE_MAP['line_item_daily_summary'] ce_table = getattr(self.accessor.report_schema, ce_table_name) summary_table = getattr(self.accessor.report_schema, summary_table_name) for _ in range(10): bill = self.creator.create_cost_entry_bill() cost_entry = self.creator.create_cost_entry(bill) product = self.creator.create_cost_entry_product() pricing = self.creator.create_cost_entry_pricing() reservation = self.creator.create_cost_entry_reservation() self.creator.create_cost_entry_line_item(bill, cost_entry, product, pricing, reservation) start_date, end_date = self.accessor._session.query( func.min(ce_table.interval_start), func.max(ce_table.interval_start)).first() query = self.accessor._get_db_obj_query(summary_table_name) initial_count = query.count() self.accessor.populate_line_item_daily_table(start_date, end_date) self.accessor.populate_line_item_daily_summary_table( start_date, end_date) self.assertNotEqual(query.count(), initial_count) result_start_date, result_end_date = self.accessor._session.query( func.min(summary_table.usage_start), func.max(summary_table.usage_start)).first() self.assertEqual(result_start_date, start_date.date()) self.assertEqual(result_end_date, end_date.date())
def tbl_years(): years = {} for tbl in registered_models: tbl_name = table_name(tbl) if hasattr(tbl, "year"): qry = tbl.query.with_entities( func.max(tbl.year).label("max_year"), func.min(tbl.year).label("min_year"), ) res = qry.one() years[tbl_name] = {consts.LATEST: res.max_year, consts.OLDEST: res.min_year} else: years[tbl_name] = None return years